Compare commits
10 commits
696e1aafb9
...
be26313225
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be26313225 | ||
|
|
043b1730dc | ||
|
|
a5bc6c5226 | ||
|
|
f3f2b046b7 | ||
|
|
d89d3100f6 | ||
|
|
fe03ec8f1a | ||
|
|
5a28a24294 | ||
|
|
ad81b9791a | ||
|
|
c545b2381d | ||
|
|
4b9d965fac |
46 changed files with 1786 additions and 67 deletions
121
.forgejo/workflows/ci-gates.yml
Normal file
121
.forgejo/workflows/ci-gates.yml
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
name: ci-gates
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request: {}
|
||||||
|
push:
|
||||||
|
branches: [main]
|
||||||
|
workflow_dispatch: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
checks:
|
||||||
|
if: ${{ github.server_url != 'https://codeberg.org' }}
|
||||||
|
runs-on: codeberg-medium-lazy
|
||||||
|
steps:
|
||||||
|
- name: Fetch source (no git required)
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if [[ -z "${GITHUB_TOKEN:-}" ]]; then
|
||||||
|
echo "error: missing github.token"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "error: curl is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v tar >/dev/null 2>&1; then
|
||||||
|
echo "error: tar is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SHA:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SHA"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SERVER_URL:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SERVER_URL"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_REPOSITORY"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf .repo
|
||||||
|
mkdir -p .repo
|
||||||
|
curl -fsSL -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/archive/${GITHUB_SHA}.tar.gz?rev=${GITHUB_SHA}" \
|
||||||
|
-o .repo/src.tgz
|
||||||
|
tar -xzf .repo/src.tgz -C .repo --strip-components=1
|
||||||
|
rm -f .repo/src.tgz
|
||||||
|
|
||||||
|
- name: Bootstrap Rust + web build tools
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
install -d -m 755 "$HOME/.local/bin"
|
||||||
|
echo "PATH=$HOME/.local/bin:$PATH" >> "$GITHUB_ENV"
|
||||||
|
export PATH="$HOME/.local/bin:$PATH"
|
||||||
|
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "error: curl is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v cargo >/dev/null 2>&1; then
|
||||||
|
curl -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
elif [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
|
||||||
|
if ! command -v trunk >/dev/null 2>&1; then
|
||||||
|
trunk_version="0.21.14"
|
||||||
|
arch="$(uname -m)"
|
||||||
|
case "${arch}" in
|
||||||
|
x86_64|amd64) trunk_target="x86_64-unknown-linux-gnu" ;;
|
||||||
|
aarch64|arm64) trunk_target="aarch64-unknown-linux-gnu" ;;
|
||||||
|
*)
|
||||||
|
echo "error: unsupported runner arch for trunk prebuilt binary: ${arch}"
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
curl -fsSL "https://github.com/trunk-rs/trunk/releases/download/v${trunk_version}/trunk-${trunk_target}.tar.gz" \
|
||||||
|
| tar -xz -C "$HOME/.local/bin" trunk
|
||||||
|
fi
|
||||||
|
|
||||||
|
cargo --version
|
||||||
|
rustc --version
|
||||||
|
trunk --version
|
||||||
|
|
||||||
|
- name: ECP lint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
bash ./scripts/ecp-lint.sh
|
||||||
|
|
||||||
|
- name: Rust tests (core subset)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
if [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
cargo test -p ec-core -p ec-crypto -p ec-moq -p ec-iroh -p ec-linux-iptv
|
||||||
|
|
||||||
|
- name: Build web (apps/web)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
if [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
cd apps/web
|
||||||
|
env -u NO_COLOR trunk build --release --public-url /
|
||||||
|
|
@ -10,7 +10,8 @@ concurrency:
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
checks:
|
||||||
|
if: ${{ github.server_url != 'https://codeberg.org' }}
|
||||||
runs-on: codeberg-medium-lazy
|
runs-on: codeberg-medium-lazy
|
||||||
steps:
|
steps:
|
||||||
- name: Fetch Source (no git required)
|
- name: Fetch Source (no git required)
|
||||||
|
|
@ -35,13 +36,131 @@ jobs:
|
||||||
echo "error: missing GITHUB_SHA"
|
echo "error: missing GITHUB_SHA"
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
if [[ -z "${GITHUB_SERVER_URL:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SERVER_URL"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_REPOSITORY"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf .repo
|
||||||
|
mkdir -p .repo
|
||||||
|
|
||||||
|
curl -fsSL -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/archive/${GITHUB_SHA}.tar.gz?rev=${GITHUB_SHA}" \
|
||||||
|
-o .repo/src.tgz
|
||||||
|
tar -xzf .repo/src.tgz -C .repo --strip-components=1
|
||||||
|
rm -f .repo/src.tgz
|
||||||
|
|
||||||
|
- name: Bootstrap Rust + web build tools
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
install -d -m 755 "$HOME/.local/bin"
|
||||||
|
echo "PATH=$HOME/.local/bin:$PATH" >> "$GITHUB_ENV"
|
||||||
|
export PATH="$HOME/.local/bin:$PATH"
|
||||||
|
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "error: curl is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v cargo >/dev/null 2>&1; then
|
||||||
|
curl -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
elif [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
|
||||||
|
if ! command -v trunk >/dev/null 2>&1; then
|
||||||
|
trunk_version="0.21.14"
|
||||||
|
arch="$(uname -m)"
|
||||||
|
case "${arch}" in
|
||||||
|
x86_64|amd64) trunk_target="x86_64-unknown-linux-gnu" ;;
|
||||||
|
aarch64|arm64) trunk_target="aarch64-unknown-linux-gnu" ;;
|
||||||
|
*)
|
||||||
|
echo "error: unsupported runner arch for trunk prebuilt binary: ${arch}"
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
curl -fsSL "https://github.com/trunk-rs/trunk/releases/download/v${trunk_version}/trunk-${trunk_target}.tar.gz" \
|
||||||
|
| tar -xz -C "$HOME/.local/bin" trunk
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: ECP lint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
bash ./scripts/ecp-lint.sh
|
||||||
|
|
||||||
|
- name: Rust tests (core subset)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
if [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
cargo test -p ec-core -p ec-crypto -p ec-moq -p ec-iroh -p ec-linux-iptv
|
||||||
|
|
||||||
|
- name: Build site (web)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
if [[ -f "$HOME/.cargo/env" ]]; then
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
cd apps/web
|
||||||
|
env -u NO_COLOR trunk build --release --public-url /
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
if: ${{ github.server_url != 'https://codeberg.org' }}
|
||||||
|
needs: checks
|
||||||
|
runs-on: codeberg-medium-lazy
|
||||||
|
steps:
|
||||||
|
- name: Fetch Source (no git required)
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if [[ -z "${GITHUB_TOKEN:-}" ]]; then
|
||||||
|
echo "error: missing github.token"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "error: curl is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v tar >/dev/null 2>&1; then
|
||||||
|
echo "error: tar is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SHA:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SHA"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SERVER_URL:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SERVER_URL"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_REPOSITORY"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
rm -rf .repo
|
rm -rf .repo
|
||||||
mkdir -p .repo
|
mkdir -p .repo
|
||||||
|
|
||||||
# Use the authenticated API archive endpoint (works for private repos).
|
# Use the authenticated API archive endpoint (works for private repos).
|
||||||
curl -fsSL -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/archive/${GITHUB_SHA}.tar.gz?rev=${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/archive/${GITHUB_SHA}.tar.gz?rev=${GITHUB_SHA}" \
|
||||||
-o .repo/src.tgz
|
-o .repo/src.tgz
|
||||||
tar -xzf .repo/src.tgz -C .repo --strip-components=1
|
tar -xzf .repo/src.tgz -C .repo --strip-components=1
|
||||||
rm -f .repo/src.tgz
|
rm -f .repo/src.tgz
|
||||||
|
|
@ -110,7 +229,7 @@ jobs:
|
||||||
cd .repo
|
cd .repo
|
||||||
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
-H "content-type: application/json" \
|
-H "content-type: application/json" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/statuses/${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/statuses/${GITHUB_SHA}" \
|
||||||
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"bootstrap ok"}' >/dev/null
|
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"bootstrap ok"}' >/dev/null
|
||||||
|
|
||||||
- name: Configure CI Age identity
|
- name: Configure CI Age identity
|
||||||
|
|
@ -135,7 +254,7 @@ jobs:
|
||||||
|
|
||||||
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
-H "content-type: application/json" \
|
-H "content-type: application/json" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/statuses/${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/statuses/${GITHUB_SHA}" \
|
||||||
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"age key ok"}' >/dev/null
|
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"age key ok"}' >/dev/null
|
||||||
|
|
||||||
- name: Decrypt CI secrets from repo
|
- name: Decrypt CI secrets from repo
|
||||||
|
|
@ -161,7 +280,7 @@ jobs:
|
||||||
|
|
||||||
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
-H "content-type: application/json" \
|
-H "content-type: application/json" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/statuses/${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/statuses/${GITHUB_SHA}" \
|
||||||
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"decrypt ok"}' >/dev/null
|
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"decrypt ok"}' >/dev/null
|
||||||
|
|
||||||
- name: Build site (web)
|
- name: Build site (web)
|
||||||
|
|
@ -200,7 +319,7 @@ jobs:
|
||||||
|
|
||||||
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
-H "content-type: application/json" \
|
-H "content-type: application/json" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/statuses/${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/statuses/${GITHUB_SHA}" \
|
||||||
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"build ok"}' >/dev/null
|
-d '{"context":"deploy-cloudflare/breadcrumb","state":"pending","description":"build ok"}' >/dev/null
|
||||||
|
|
||||||
- name: Deploy worker
|
- name: Deploy worker
|
||||||
|
|
@ -216,5 +335,5 @@ jobs:
|
||||||
|
|
||||||
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
curl -fsSL -X POST -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
-H "content-type: application/json" \
|
-H "content-type: application/json" \
|
||||||
"https://codeberg.org/api/v1/repos/every-channel/every.channel/statuses/${GITHUB_SHA}" \
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/statuses/${GITHUB_SHA}" \
|
||||||
-d '{"context":"deploy-cloudflare/breadcrumb","state":"success","description":"deploy ok"}' >/dev/null
|
-d '{"context":"deploy-cloudflare/breadcrumb","state":"success","description":"deploy ok"}' >/dev/null
|
||||||
|
|
|
||||||
276
.forgejo/workflows/deploy-runner-images.yml
Normal file
276
.forgejo/workflows/deploy-runner-images.yml
Normal file
|
|
@ -0,0 +1,276 @@
|
||||||
|
name: deploy-runner-images
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags: [boot-v*]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
release_tag:
|
||||||
|
description: "Release tag override (manual runs only)"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
publish_release:
|
||||||
|
description: "Publish artifacts to Forgejo release (true/false)"
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
build_x86_64_netboot:
|
||||||
|
description: "Build x86_64 netboot tarball (true/false)"
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
build_x86_64_iso:
|
||||||
|
description: "Build x86_64 installer ISO (true/false)"
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: runner-image-deploy-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-release:
|
||||||
|
if: ${{ github.server_url != 'https://codeberg.org' }}
|
||||||
|
runs-on: codeberg-medium-lazy
|
||||||
|
steps:
|
||||||
|
- name: Fetch source (no git required)
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if [[ -z "${GITHUB_TOKEN:-}" ]]; then
|
||||||
|
echo "error: missing github.token"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "error: curl is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! command -v tar >/dev/null 2>&1; then
|
||||||
|
echo "error: tar is required"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SHA:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SHA"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SERVER_URL:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SERVER_URL"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_REPOSITORY"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf .repo
|
||||||
|
mkdir -p .repo
|
||||||
|
curl -fsSL -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
"${GITHUB_SERVER_URL}/api/v1/repos/${GITHUB_REPOSITORY}/archive/${GITHUB_SHA}.tar.gz?rev=${GITHUB_SHA}" \
|
||||||
|
-o .repo/src.tgz
|
||||||
|
tar -xzf .repo/src.tgz -C .repo --strip-components=1
|
||||||
|
rm -f .repo/src.tgz
|
||||||
|
|
||||||
|
- name: Bootstrap Nix
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if ! command -v nix >/dev/null 2>&1; then
|
||||||
|
curl -fsSL https://nixos.org/nix/install -o /tmp/install-nix.sh
|
||||||
|
sh /tmp/install-nix.sh --no-daemon --yes
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||||
|
fi
|
||||||
|
if [[ -d "$HOME/.nix-profile/bin" ]]; then
|
||||||
|
echo "PATH=$HOME/.nix-profile/bin:$PATH" >> "$GITHUB_ENV"
|
||||||
|
export PATH="$HOME/.nix-profile/bin:$PATH"
|
||||||
|
fi
|
||||||
|
nix --version
|
||||||
|
|
||||||
|
- name: Resolve build plan
|
||||||
|
id: plan
|
||||||
|
env:
|
||||||
|
INPUT_RELEASE_TAG: ${{ github.event.inputs.release_tag }}
|
||||||
|
INPUT_PUBLISH_RELEASE: ${{ github.event.inputs.publish_release }}
|
||||||
|
INPUT_BUILD_X86_64_NETBOOT: ${{ github.event.inputs.build_x86_64_netboot }}
|
||||||
|
INPUT_BUILD_X86_64_ISO: ${{ github.event.inputs.build_x86_64_iso }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
bool_norm() {
|
||||||
|
local raw
|
||||||
|
raw="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')"
|
||||||
|
case "${raw}" in
|
||||||
|
''|true|1|yes|y|on) echo "true" ;;
|
||||||
|
false|0|no|n|off) echo "false" ;;
|
||||||
|
*)
|
||||||
|
echo "error: invalid boolean value '${1}'" >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
short_sha="${GITHUB_SHA:0:12}"
|
||||||
|
if [[ "${GITHUB_REF:-}" == refs/tags/* ]]; then
|
||||||
|
release_tag="${GITHUB_REF#refs/tags/}"
|
||||||
|
else
|
||||||
|
release_tag="${INPUT_RELEASE_TAG:-}"
|
||||||
|
if [[ -z "${release_tag}" ]]; then
|
||||||
|
release_tag="boot-${short_sha}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! "${release_tag}" =~ ^[A-Za-z0-9._-]+$ ]]; then
|
||||||
|
echo "error: release tag contains unsupported characters: ${release_tag}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
publish_release="$(bool_norm "${INPUT_PUBLISH_RELEASE:-true}")"
|
||||||
|
build_x86_64_netboot="$(bool_norm "${INPUT_BUILD_X86_64_NETBOOT:-true}")"
|
||||||
|
build_x86_64_iso="$(bool_norm "${INPUT_BUILD_X86_64_ISO:-true}")"
|
||||||
|
|
||||||
|
if [[ "${build_x86_64_netboot}" != "true" && "${build_x86_64_iso}" != "true" ]]; then
|
||||||
|
echo "error: at least one image build must be enabled" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
artifact_suffix="${short_sha}-${GITHUB_RUN_NUMBER:-0}-${GITHUB_RUN_ATTEMPT:-1}"
|
||||||
|
|
||||||
|
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "publish_release=${publish_release}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "build_x86_64_netboot=${build_x86_64_netboot}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "build_x86_64_iso=${build_x86_64_iso}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "artifact_suffix=${artifact_suffix}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
- name: Build runner boot images
|
||||||
|
env:
|
||||||
|
BUILD_X86_64_NETBOOT: ${{ steps.plan.outputs.build_x86_64_netboot }}
|
||||||
|
BUILD_X86_64_ISO: ${{ steps.plan.outputs.build_x86_64_iso }}
|
||||||
|
ARTIFACT_SUFFIX: ${{ steps.plan.outputs.artifact_suffix }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
|
||||||
|
if [[ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||||
|
fi
|
||||||
|
export PATH="$HOME/.nix-profile/bin:$PATH"
|
||||||
|
|
||||||
|
artifacts_dir="$PWD/.artifacts"
|
||||||
|
rm -rf "${artifacts_dir}"
|
||||||
|
mkdir -p "${artifacts_dir}"
|
||||||
|
|
||||||
|
nix_args=(--accept-flake-config --extra-experimental-features "nix-command flakes")
|
||||||
|
|
||||||
|
if [[ "${BUILD_X86_64_NETBOOT}" == "true" ]]; then
|
||||||
|
nix build "${nix_args[@]}" \
|
||||||
|
.#nixosConfigurations.ec-runner-x86_64-netboot.config.system.build.netboot \
|
||||||
|
-o result-netboot-x86_64
|
||||||
|
tar -C result-netboot-x86_64 \
|
||||||
|
-czf "${artifacts_dir}/ec-runner-x86_64-netboot-${ARTIFACT_SUFFIX}.tar.gz" \
|
||||||
|
kernel initrd netboot.ipxe
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${BUILD_X86_64_ISO}" == "true" ]]; then
|
||||||
|
nix build "${nix_args[@]}" \
|
||||||
|
.#nixosConfigurations.ec-runner-x86_64-iso.config.system.build.isoImage \
|
||||||
|
-o result-iso-x86_64
|
||||||
|
iso_source=""
|
||||||
|
if [[ -f result-iso-x86_64 ]]; then
|
||||||
|
iso_source="result-iso-x86_64"
|
||||||
|
else
|
||||||
|
iso_source="$(find -L result-iso-x86_64 -type f -name '*.iso' | head -n 1 || true)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${iso_source}" ]]; then
|
||||||
|
echo "error: could not locate ISO output from result-iso-x86_64" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
cp -f "${iso_source}" "${artifacts_dir}/ec-runner-x86_64-iso-${ARTIFACT_SUFFIX}.iso"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! find "${artifacts_dir}" -maxdepth 1 -type f | grep -q .; then
|
||||||
|
echo "error: no image artifacts were produced" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
cd "${artifacts_dir}"
|
||||||
|
sha256sum -- * > SHA256SUMS.txt
|
||||||
|
ls -lh
|
||||||
|
)
|
||||||
|
|
||||||
|
- name: Publish artifacts to Forgejo release
|
||||||
|
if: ${{ steps.plan.outputs.publish_release == 'true' }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
RELEASE_TAG: ${{ steps.plan.outputs.release_tag }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd .repo
|
||||||
|
|
||||||
|
if [[ -z "${GITHUB_TOKEN:-}" ]]; then
|
||||||
|
echo "error: missing github.token"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_SERVER_URL:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_SERVER_URL"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
|
||||||
|
echo "error: missing GITHUB_REPOSITORY"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
api_base="${GITHUB_SERVER_URL%/}/api/v1/repos/${GITHUB_REPOSITORY}"
|
||||||
|
release_json="$(curl -fsSL \
|
||||||
|
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
"${api_base}/releases/tags/${RELEASE_TAG}" 2>/dev/null || true)"
|
||||||
|
|
||||||
|
if [[ -z "${release_json}" ]]; then
|
||||||
|
payload="$(cat <<JSON
|
||||||
|
{
|
||||||
|
"tag_name": "${RELEASE_TAG}",
|
||||||
|
"name": "Boot images ${RELEASE_TAG}",
|
||||||
|
"body": "Automated runner boot image build from ${GITHUB_SHA}.",
|
||||||
|
"draft": false,
|
||||||
|
"prerelease": false
|
||||||
|
}
|
||||||
|
JSON
|
||||||
|
)"
|
||||||
|
release_json="$(curl -fsSL -X POST \
|
||||||
|
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
-H "content-type: application/json" \
|
||||||
|
"${api_base}/releases" \
|
||||||
|
-d "${payload}")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
release_id=""
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
release_id="$(python3 -c 'import json,sys; print(json.load(sys.stdin)["id"])' <<<"${release_json}" 2>/dev/null || true)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${release_id}" ]]; then
|
||||||
|
release_id="$(printf '%s' "${release_json}" \
|
||||||
|
| sed -nE 's/.*"id"[[:space:]]*:[[:space:]]*([0-9]+).*/\1/p' \
|
||||||
|
| head -n 1)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${release_id}" ]]; then
|
||||||
|
echo "error: failed to resolve release id for ${RELEASE_TAG}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
for asset_path in .artifacts/*; do
|
||||||
|
[[ -f "${asset_path}" ]] || continue
|
||||||
|
asset_name="$(basename "${asset_path}")"
|
||||||
|
curl -fsSL -X POST \
|
||||||
|
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||||
|
-H "content-type: application/octet-stream" \
|
||||||
|
--data-binary @"${asset_path}" \
|
||||||
|
"${api_base}/releases/${release_id}/assets?name=${asset_name}" >/dev/null
|
||||||
|
echo "uploaded: ${asset_name}"
|
||||||
|
done
|
||||||
|
|
@ -5,6 +5,7 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
smoke:
|
smoke:
|
||||||
|
if: ${{ github.server_url != 'https://codeberg.org' }}
|
||||||
runs-on: codeberg-medium-lazy
|
runs-on: codeberg-medium-lazy
|
||||||
steps:
|
steps:
|
||||||
- name: Basic runner + secret smoke test
|
- name: Basic runner + secret smoke test
|
||||||
|
|
|
||||||
12
README.md
12
README.md
|
|
@ -47,6 +47,18 @@ Runbook:
|
||||||
cat docs/USAGE.md
|
cat docs/USAGE.md
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Git hosting topology:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cat docs/GIT_HOSTING.md
|
||||||
|
```
|
||||||
|
|
||||||
|
NUC PXE rollout (Unifi + ProxyDHCP):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cat docs/NUC_UNIFI_NETBOOT.md
|
||||||
|
```
|
||||||
|
|
||||||
## WebTransport Watch (MoQ)
|
## WebTransport Watch (MoQ)
|
||||||
|
|
||||||
Publish (node -> Cloudflare relay):
|
Publish (node -> Cloudflare relay):
|
||||||
|
|
|
||||||
|
|
@ -162,6 +162,7 @@ function mountPlayer(relayUrl, name) {
|
||||||
watch.setAttribute("name", name);
|
watch.setAttribute("name", name);
|
||||||
watch.setAttribute("path", name);
|
watch.setAttribute("path", name);
|
||||||
watch.setAttribute("volume", "1");
|
watch.setAttribute("volume", "1");
|
||||||
|
watch.setAttribute("muted", "");
|
||||||
|
|
||||||
// Force WebTransport in-browser; websocket fallback has shown degraded
|
// Force WebTransport in-browser; websocket fallback has shown degraded
|
||||||
// media behavior (especially audio) against public relay paths.
|
// media behavior (especially audio) against public relay paths.
|
||||||
|
|
@ -169,15 +170,16 @@ function mountPlayer(relayUrl, name) {
|
||||||
watch.connection.websocket = { enabled: false };
|
watch.connection.websocket = { enabled: false };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use a media element for live playback so browser audio controls/policies apply naturally.
|
// Prefer a video element for native controls/audio routing.
|
||||||
|
// Start muted to satisfy autoplay policy, then unlock audio on user gesture.
|
||||||
const video = document.createElement("video");
|
const video = document.createElement("video");
|
||||||
video.className = "archiveVideo";
|
video.className = "archiveVideo";
|
||||||
video.controls = true;
|
video.controls = true;
|
||||||
video.autoplay = true;
|
video.autoplay = true;
|
||||||
video.muted = false;
|
video.muted = true;
|
||||||
|
video.volume = 1;
|
||||||
video.playsInline = true;
|
video.playsInline = true;
|
||||||
watch.appendChild(video);
|
watch.appendChild(video);
|
||||||
|
|
||||||
mount.appendChild(watch);
|
mount.appendChild(watch);
|
||||||
const forceAudioOn = () => {
|
const forceAudioOn = () => {
|
||||||
try {
|
try {
|
||||||
|
|
@ -187,9 +189,19 @@ function mountPlayer(relayUrl, name) {
|
||||||
// Best effort only.
|
// Best effort only.
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
const unlockAudio = () => {
|
||||||
forceAudioOn();
|
forceAudioOn();
|
||||||
window.setTimeout(forceAudioOn, 1000);
|
watch.backend?.paused?.set?.(true);
|
||||||
window.setTimeout(forceAudioOn, 4000);
|
watch.backend?.paused?.set?.(false);
|
||||||
|
video.muted = false;
|
||||||
|
video.volume = 1;
|
||||||
|
void video.play().catch(() => {});
|
||||||
|
setHint(`Live: subscribed to ${name} (audio unlocked)`, "ok");
|
||||||
|
};
|
||||||
|
document.addEventListener("pointerdown", unlockAudio, { once: true });
|
||||||
|
video.addEventListener("pointerdown", unlockAudio, { once: true });
|
||||||
|
setHint(`Live: subscribed to ${name} (tap video to unmute)`, "warn");
|
||||||
|
void video.play().catch(() => {});
|
||||||
bindPlayerSignals(watch, name);
|
bindPlayerSignals(watch, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6312,6 +6312,10 @@ async fn wt_publish(args: WtPublishArgs) -> Result<()> {
|
||||||
|
|
||||||
if args.transcode {
|
if args.transcode {
|
||||||
cmd.args([
|
cmd.args([
|
||||||
|
"-map",
|
||||||
|
"0:v:0",
|
||||||
|
"-map",
|
||||||
|
"0:a:0?",
|
||||||
"-c:v",
|
"-c:v",
|
||||||
"libx264",
|
"libx264",
|
||||||
"-preset",
|
"-preset",
|
||||||
|
|
@ -6332,8 +6336,10 @@ async fn wt_publish(args: WtPublishArgs) -> Result<()> {
|
||||||
"1",
|
"1",
|
||||||
"-c:a",
|
"-c:a",
|
||||||
"aac",
|
"aac",
|
||||||
|
"-profile:a",
|
||||||
|
"aac_low",
|
||||||
"-b:a",
|
"-b:a",
|
||||||
"128k",
|
"160k",
|
||||||
"-ac",
|
"-ac",
|
||||||
"2",
|
"2",
|
||||||
"-ar",
|
"-ar",
|
||||||
|
|
|
||||||
37
docs/BRANCH_PROTECTION.md
Normal file
37
docs/BRANCH_PROTECTION.md
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
# Branch Protection (Forgejo Primary)
|
||||||
|
|
||||||
|
`main` should be protected to satisfy constitutional governance (`all changes merge through pull requests`) and to require CI before merge.
|
||||||
|
|
||||||
|
## Required settings
|
||||||
|
|
||||||
|
- Protected branch: `main`
|
||||||
|
- Direct pushes disabled
|
||||||
|
- Required approvals: `1` (or stricter)
|
||||||
|
- Required status checks:
|
||||||
|
- `ci-gates / checks`
|
||||||
|
- Require signed commits: enabled
|
||||||
|
|
||||||
|
## Apply via script
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./scripts/fj-enforce-branch-protection.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional overrides:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
EVERY_CHANNEL_FORGE_HOST=https://forge.every.channel \
|
||||||
|
EVERY_CHANNEL_FORGE_REPO=every-channel/every.channel \
|
||||||
|
EVERY_CHANNEL_PROTECTED_BRANCH=main \
|
||||||
|
EVERY_CHANNEL_REQUIRED_CHECKS="ci-gates / checks" \
|
||||||
|
EVERY_CHANNEL_REQUIRED_APPROVALS=1 \
|
||||||
|
./scripts/fj-enforce-branch-protection.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Token source order:
|
||||||
|
|
||||||
|
1. `EVERY_CHANNEL_FORGE_TOKEN` / `FORGE_TOKEN` / `CODEBERG_TOKEN` env var
|
||||||
|
2. `secrets/forge-token.age` (preferred) via `agenix` or `age`
|
||||||
|
3. `secrets/codeberg-token.age` (compat) via `agenix` or `age`
|
||||||
|
|
||||||
|
The token must have repository admin scope to edit branch protection.
|
||||||
|
|
@ -1,22 +1,25 @@
|
||||||
# Cloudflare Deploy (Forgejo Actions)
|
# Cloudflare Deploy (Forgejo Actions)
|
||||||
|
|
||||||
This repo deploys `https://every.channel` via Wrangler.
|
This repo deploys `https://every.channel` via Wrangler.
|
||||||
|
The deploy workflow is intended to run on the primary Forgejo host (not Codeberg/GitHub mirrors).
|
||||||
|
|
||||||
## Prereqs
|
## Prereqs
|
||||||
|
|
||||||
- Forgejo Actions enabled on the repo.
|
- Forgejo Actions enabled on the repo.
|
||||||
- A Cloudflare API token stored as a Forgejo Actions secret:
|
- Forgejo Actions secret `AGE_FORGE_SSH_KEY` set to the SSH private key used to decrypt repo-encrypted age secrets.
|
||||||
- name: `CLOUDFLARE_API_TOKEN`
|
- `secrets/cloudflare-api-token.age` present in-repo and decryptable by `AGE_FORGE_SSH_KEY`.
|
||||||
|
|
||||||
The workflow is defined in `.forgejo/workflows/deploy-cloudflare.yml`.
|
CI and deploy workflows:
|
||||||
|
|
||||||
|
- PR/main checks: `.forgejo/workflows/ci-gates.yml`
|
||||||
|
- Deploy (main only, depends on checks): `.forgejo/workflows/deploy-cloudflare.yml`
|
||||||
|
|
||||||
|
Mirror behavior:
|
||||||
|
|
||||||
|
- Workflow jobs are guarded to skip execution on `https://codeberg.org`.
|
||||||
|
|
||||||
## Manual deploy (local)
|
## Manual deploy (local)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd apps/tauri/ui
|
./scripts/deploy-workers.sh
|
||||||
trunk build --release --public-url /
|
|
||||||
|
|
||||||
cd deploy/cloudflare-worker
|
|
||||||
npm ci
|
|
||||||
npm run deploy
|
|
||||||
```
|
```
|
||||||
|
|
|
||||||
45
docs/GIT_HOSTING.md
Normal file
45
docs/GIT_HOSTING.md
Normal file
|
|
@ -0,0 +1,45 @@
|
||||||
|
# Git Hosting Topology
|
||||||
|
|
||||||
|
Primary host:
|
||||||
|
|
||||||
|
- Forgejo (`origin`)
|
||||||
|
|
||||||
|
Mirrors (push-only):
|
||||||
|
|
||||||
|
- Codeberg (`mirror-codeberg`)
|
||||||
|
- GitHub (`mirror-github`)
|
||||||
|
|
||||||
|
Codeberg and GitHub are distribution mirrors only. CI/actions should run on Forgejo primary.
|
||||||
|
|
||||||
|
## Configure local remotes
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./scripts/git-configure-hosting.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Defaults:
|
||||||
|
|
||||||
|
- `origin`: `git@forge.every.channel:every-channel/every.channel.git`
|
||||||
|
- `mirror-codeberg`: `git@codeberg.org:every-channel/every.channel.git`
|
||||||
|
- `mirror-github`: `git@github.com:every-channel/every.channel.git`
|
||||||
|
|
||||||
|
You can override via env vars:
|
||||||
|
|
||||||
|
- `EVERY_CHANNEL_PRIMARY_GIT_URL`
|
||||||
|
- `EVERY_CHANNEL_CODEBERG_GIT_URL`
|
||||||
|
- `EVERY_CHANNEL_GITHUB_GIT_URL`
|
||||||
|
|
||||||
|
## Push mirrors
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./scripts/git-push-mirrors.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Disable actions on Codeberg mirror
|
||||||
|
|
||||||
|
```sh
|
||||||
|
EVERY_CHANNEL_FORGE_HOST=https://codeberg.org \
|
||||||
|
EVERY_CHANNEL_FORGE_REPO=every-channel/every.channel \
|
||||||
|
EVERY_CHANNEL_FORGE_ACTIONS_ENABLED=false \
|
||||||
|
./scripts/forge-set-repo-actions.sh
|
||||||
|
```
|
||||||
102
docs/NUC_UNIFI_NETBOOT.md
Normal file
102
docs/NUC_UNIFI_NETBOOT.md
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
# NUC Fleet Netboot (Unifi + ProxyDHCP)
|
||||||
|
|
||||||
|
This runbook provisions x86_64 NUCs from runner netboot artifacts without USB image flashing.
|
||||||
|
|
||||||
|
It uses:
|
||||||
|
|
||||||
|
- Unifi DHCP for IP leases.
|
||||||
|
- Local `dnsmasq` ProxyDHCP for PXE/iPXE bootfile logic.
|
||||||
|
- Local HTTP + TFTP service for boot artifacts.
|
||||||
|
|
||||||
|
## Why ProxyDHCP
|
||||||
|
|
||||||
|
iPXE commonly needs two boot stages:
|
||||||
|
|
||||||
|
1. firmware PXE -> `ipxe.efi`
|
||||||
|
2. iPXE -> `netboot.ipxe`
|
||||||
|
|
||||||
|
If DHCP always returns `ipxe.efi`, clients can loop forever. ProxyDHCP handles stage-specific boot responses cleanly while leaving Unifi as the DHCP lease server.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- A Linux boot server on the same VLAN/L2 domain as the NUCs.
|
||||||
|
- Unifi network with normal DHCP enabled.
|
||||||
|
- Local DNS record on that VLAN: `boot.every.channel -> <boot-server-ip>`.
|
||||||
|
- `curl`, `tar`, `python3`, `dnsmasq` installed on the boot server.
|
||||||
|
- Runner netboot artifact already published to Forgejo Releases (or available as a local tarball).
|
||||||
|
|
||||||
|
## 1) Stage artifacts
|
||||||
|
|
||||||
|
From repository root on the boot server:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./scripts/netboot-stage.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional inputs:
|
||||||
|
|
||||||
|
- `EVERY_CHANNEL_NETBOOT_RELEASE_TAG=boot-v2026.02.28`
|
||||||
|
- `EVERY_CHANNEL_NETBOOT_TARBALL=/path/to/ec-runner-x86_64-netboot-....tar.gz`
|
||||||
|
- `EVERY_CHANNEL_FORGE_TOKEN=<token>` for private releases
|
||||||
|
- `EVERY_CHANNEL_NETBOOT_HOSTNAME=boot.every.channel`
|
||||||
|
|
||||||
|
This stages:
|
||||||
|
|
||||||
|
- `tmp/netboot/http/{kernel,initrd,netboot.ipxe}`
|
||||||
|
- `tmp/netboot/tftp/ipxe.efi`
|
||||||
|
|
||||||
|
## 2) Serve HTTP + TFTP + ProxyDHCP
|
||||||
|
|
||||||
|
Example (replace values for your VLAN):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo \
|
||||||
|
EVERY_CHANNEL_NETBOOT_LISTEN_IP=10.20.30.2 \
|
||||||
|
EVERY_CHANNEL_NETBOOT_INTERFACE=eth0 \
|
||||||
|
EVERY_CHANNEL_NETBOOT_PROXY_SUBNET=10.20.30.0/24 \
|
||||||
|
EVERY_CHANNEL_NETBOOT_HOSTNAME=boot.every.channel \
|
||||||
|
./scripts/netboot-serve.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- Keep this process running during provisioning.
|
||||||
|
- Do not set Unifi DHCP bootfile options while this proxy mode is active.
|
||||||
|
- Ensure `boot.every.channel` resolves to the boot server IP from NUC clients.
|
||||||
|
|
||||||
|
## 3) Unifi / NUC settings
|
||||||
|
|
||||||
|
Unifi:
|
||||||
|
|
||||||
|
- Keep DHCP enabled for the provisioning VLAN.
|
||||||
|
- Leave DHCP boot/TFTP overrides unset when using `netboot-serve.sh`.
|
||||||
|
- Create/verify local DNS host override: `boot.every.channel -> <boot-server-ip>`.
|
||||||
|
|
||||||
|
NUC BIOS:
|
||||||
|
|
||||||
|
- Enable UEFI network boot (IPv4 PXE).
|
||||||
|
- Disable Legacy/CSM if possible.
|
||||||
|
- Put network boot before disk for first install cycle.
|
||||||
|
|
||||||
|
## 4) Provision the fleet
|
||||||
|
|
||||||
|
1. Boot each NUC on the provisioning VLAN.
|
||||||
|
2. PXE will chainload into iPXE and then runner `netboot.ipxe`.
|
||||||
|
3. Complete install/bootstrap flow on each node.
|
||||||
|
4. After successful install, switch boot order back to local disk.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
- Symptom: iPXE loop (`ipxe.efi` repeatedly)
|
||||||
|
- Cause: static DHCP bootfile without iPXE-aware logic.
|
||||||
|
- Fix: use ProxyDHCP flow (`netboot-serve.sh`) or set conditional DHCP rules.
|
||||||
|
- Symptom: NUC gets IP but never downloads boot artifacts
|
||||||
|
- Verify firewall allows UDP 67/68, UDP 69, and TCP 8080 between NUCs and boot server.
|
||||||
|
- Symptom: no `dnsmasq` offers seen
|
||||||
|
- Verify `EVERY_CHANNEL_NETBOOT_INTERFACE` and `EVERY_CHANNEL_NETBOOT_PROXY_SUBNET`.
|
||||||
|
|
||||||
|
## Security / networking
|
||||||
|
|
||||||
|
- Tailscale is not required for provisioning.
|
||||||
|
- Keep the provisioning VLAN isolated from regular clients.
|
||||||
|
- Stop `netboot-serve.sh` when rollout is complete.
|
||||||
|
|
@ -50,6 +50,36 @@ Build an aarch64 SD image:
|
||||||
nix build .#nixosConfigurations.ec-runner-aarch64-sdimage.config.system.build.sdImage
|
nix build .#nixosConfigurations.ec-runner-aarch64-sdimage.config.system.build.sdImage
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## CI Deploy (Forgejo Releases)
|
||||||
|
|
||||||
|
Boot images can be built and published from CI via:
|
||||||
|
|
||||||
|
- `.forgejo/workflows/deploy-runner-images.yml`
|
||||||
|
|
||||||
|
Triggers:
|
||||||
|
|
||||||
|
- Manual: `workflow_dispatch`
|
||||||
|
- Tags: `boot-v*` (for example `boot-v2026.02.28`)
|
||||||
|
|
||||||
|
Manual inputs (all optional):
|
||||||
|
|
||||||
|
- `release_tag` (defaults to `boot-<short-sha>`)
|
||||||
|
- `publish_release` (`true`/`false`, default `true`)
|
||||||
|
- `build_x86_64_netboot` (`true`/`false`, default `true`)
|
||||||
|
- `build_x86_64_iso` (`true`/`false`, default `true`)
|
||||||
|
|
||||||
|
Published assets are attached to the resolved Forgejo release tag and include:
|
||||||
|
|
||||||
|
- x86_64 netboot bundle (`kernel`, `initrd`, `netboot.ipxe`) as `.tar.gz`
|
||||||
|
- x86_64 installer `.iso`
|
||||||
|
- `SHA256SUMS.txt`
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- CI image publish is disabled on the Codeberg mirror host.
|
||||||
|
- Current CI scope is x86_64 targets; aarch64 image builds remain local/manual unless an aarch64-capable runner is added.
|
||||||
|
- For multi-NUC PXE rollout on Unifi networks, use `docs/NUC_UNIFI_NETBOOT.md`.
|
||||||
|
|
||||||
## Outputs
|
## Outputs
|
||||||
|
|
||||||
After building, artifacts will be in `./result` (a symlink into the Nix store).
|
After building, artifacts will be in `./result` (a symlink into the Nix store).
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0063: Cloudflare MoQ Relay + WebTransport-Only Web Watch
|
# ECP-0063: Cloudflare MoQ Relay + WebTransport-Only Web Watch
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -77,6 +77,11 @@ Implementation choice:
|
||||||
Web share link:
|
Web share link:
|
||||||
- `https://every.channel/watch?url=<relay-url>&name=<broadcast-name>`
|
- `https://every.channel/watch?url=<relay-url>&name=<broadcast-name>`
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep the legacy WebRTC/WS path as primary. Rejected because it does not align with relay-native MoQ fanout goals.
|
||||||
|
- Wait for full draft parity across all relays before shipping. Rejected because live interop was already sufficient on the chosen relay path.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Keep existing `/api/*` bootstrap endpoints during migration.
|
- Keep existing `/api/*` bootstrap endpoints during migration.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0064: NixOS Module For `ec-node` WebTransport Publisher (Tower)
|
# ECP-0064: NixOS Module For `ec-node` WebTransport Publisher (Tower)
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -41,8 +41,12 @@ Out of scope (defer):
|
||||||
- Automatic lineup-based channel selection by callsign.
|
- Automatic lineup-based channel selection by callsign.
|
||||||
- Secrets management (publisher doesn't require secrets for Cloudflare relay preview).
|
- Secrets management (publisher doesn't require secrets for Cloudflare relay preview).
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Continue running publishers manually via shells/tmux. Rejected because it is not reproducible or restart-safe.
|
||||||
|
- Build a separate external deployment repo first. Rejected because this delays in-repo infrastructure ownership.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Enabling the module is per-host.
|
- Enabling the module is per-host.
|
||||||
- Reversible by removing the module import and disabling the service(s); roll back with the existing deployment tooling.
|
- Reversible by removing the module import and disabling the service(s); roll back with the existing deployment tooling.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0065: NixOS Runner Images + Netboot Artifacts
|
# ECP-0065: NixOS Runner Images + Netboot Artifacts
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -40,6 +40,11 @@ Out of scope (defer):
|
||||||
- Remote runtime provisioning (fetching per-node channel lists).
|
- Remote runtime provisioning (fetching per-node channel lists).
|
||||||
- Hardware-accelerated transcode changes (keep current CPU x264 baseline).
|
- Hardware-accelerated transcode changes (keep current CPU x264 baseline).
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep runner images out-of-repo and publish ad hoc artifacts. Rejected because it weakens reproducibility and provenance.
|
||||||
|
- Restrict to one install path only (disk install only). Rejected because netboot/bootstrap is required for fleet recovery.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Rollout begins with local builds and a single test machine.
|
- Rollout begins with local builds and a single test machine.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0066: iroh-Gossip Control Protocol For Hybrid MoQ Discovery
|
# ECP-0066: iroh-Gossip Control Protocol For Hybrid MoQ Discovery
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -39,6 +39,11 @@ Out of scope:
|
||||||
- Security policy beyond existing iroh/gossip trust boundaries.
|
- Security policy beyond existing iroh/gossip trust boundaries.
|
||||||
- Replacing existing catalog gossip immediately (coexist first).
|
- Replacing existing catalog gossip immediately (coexist first).
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep relay and direct discovery completely separate. Rejected because it forces duplicated consumer logic.
|
||||||
|
- Replace existing catalog gossip in one cutover. Rejected because additive coexistence is safer for rollout.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Additive and reversible: removing control commands and topic does not affect existing media paths.
|
- Additive and reversible: removing control commands and topic does not affect existing media paths.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0067: Control Transport Resolution And NixOS Control Wiring
|
# ECP-0067: Control Transport Resolution And NixOS Control Wiring
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -32,6 +32,11 @@ Out of scope:
|
||||||
- End-to-end automatic failover execution (resolve + launch subscribe) in one command.
|
- End-to-end automatic failover execution (resolve + launch subscribe) in one command.
|
||||||
- Cryptographic policy hardening beyond current control-topic trust model.
|
- Cryptographic policy hardening beyond current control-topic trust model.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep transport selection in ad hoc shell logic. Rejected because policy behavior becomes inconsistent across operators.
|
||||||
|
- Wire control flags per host manually. Rejected because it is error-prone and not declarative.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Additive only: existing relay and direct publish/subscribe paths remain unchanged.
|
- Additive only: existing relay and direct publish/subscribe paths remain unchanged.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0068: Iroh Control To Web Directory Bridge
|
# ECP-0068: Iroh Control To Web Directory Bridge
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -34,6 +34,11 @@ Out of scope:
|
||||||
- Signed/authenticated control announcements.
|
- Signed/authenticated control announcements.
|
||||||
- Replacing relay playback with direct iroh in browsers.
|
- Replacing relay playback with direct iroh in browsers.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep manual stream naming/link entry on the website. Rejected because it blocks one-click discovery.
|
||||||
|
- Bridge directly from browser clients instead of a node command. Rejected because browser trust/availability constraints are higher.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Additive change; existing `/api/directory` and watch-by-link behavior remain intact.
|
- Additive change; existing `/api/directory` and watch-by-link behavior remain intact.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# ECP-0069: NixOS Control Bridge Auto-Bootstrap
|
# ECP-0069: NixOS Control Bridge Auto-Bootstrap
|
||||||
|
|
||||||
Status: Draft
|
Status: Implemented
|
||||||
|
|
||||||
## Decision
|
## Decision
|
||||||
|
|
||||||
|
|
@ -31,6 +31,11 @@ Out of scope:
|
||||||
- Signed control announcements.
|
- Signed control announcements.
|
||||||
- Browser-native iroh direct transport playback.
|
- Browser-native iroh direct transport playback.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Continue manual gossip peer bootstrapping for the bridge. Rejected because restarts/reboots cause repeated operational toil.
|
||||||
|
- Use static peer lists only. Rejected because local publisher sets are dynamic and should be discovered from runtime endpoint files.
|
||||||
|
|
||||||
## Rollout / Reversibility
|
## Rollout / Reversibility
|
||||||
|
|
||||||
- Additive: existing publisher behavior is unchanged when `control.bridgeWeb.enable = false`.
|
- Additive: existing publisher behavior is unchanged when `control.bridgeWeb.enable = false`.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0070: Relay-Native CAS Archival + NixOS Auto-Archive Service
|
# ECP-0070: Relay-Native CAS Archival + NixOS Auto-Archive Service
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Summary
|
## Summary
|
||||||
|
|
||||||
Add a first-party archival path for MoQ relay streams:
|
Add a first-party archival path for MoQ relay streams:
|
||||||
|
|
@ -48,6 +50,11 @@ Tradeoffs:
|
||||||
- Discovery source is the web public stream list (not full control-topic gossip ingestion).
|
- Discovery source is the web public stream list (not full control-topic gossip ingestion).
|
||||||
- Per-broadcast workers are process-based and best-effort supervised.
|
- Per-broadcast workers are process-based and best-effort supervised.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Rely on browser-side replay caches only. Rejected because it does not provide durable archival storage.
|
||||||
|
- Archive only manifests without CAS payloads. Rejected because replay/integrity requires retained object bytes.
|
||||||
|
|
||||||
## Rollout
|
## Rollout
|
||||||
|
|
||||||
1. Ship `wt-archive` command in `ec-node`.
|
1. Ship `wt-archive` command in `ec-node`.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0071: Archive Replay DVR Endpoints
|
# ECP-0071: Archive Replay DVR Endpoints
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
ECP-0070 added relay archival (`wt-archive`) into CAS objects plus JSONL indexes, but there is no read path for viewers to scrub historical content.
|
ECP-0070 added relay archival (`wt-archive`) into CAS objects plus JSONL indexes, but there is no read path for viewers to scrub historical content.
|
||||||
|
|
@ -26,6 +28,16 @@ Add an archive replay path with these pieces:
|
||||||
- Preserves CAS as source of truth; playlists are derived views.
|
- Preserves CAS as source of truth; playlists are derived views.
|
||||||
- Uses standard HLS+DVR semantics so browser playback + scrubbing works without custom protocol work in the short term.
|
- Uses standard HLS+DVR semantics so browser playback + scrubbing works without custom protocol work in the short term.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Build a custom replay protocol/UI instead of HLS. Rejected because browser DVR support is stronger with standard HLS tooling.
|
||||||
|
- Serve archive from a separate domain only. Rejected because same-domain replay keeps watch links and CORS simpler.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Enable archive serve mode on archive hosts and deploy worker proxy routing to `/api/archive/*`.
|
||||||
|
- Teardown by disabling `archive.serve.enable` and removing proxy routing.
|
||||||
|
|
||||||
## Reversibility
|
## Reversibility
|
||||||
|
|
||||||
- Disable `archive.serve.enable` and remove worker proxy route to revert to archive-only mode.
|
- Disable `archive.serve.enable` and remove worker proxy route to revert to archive-only mode.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0072: CMAF Seedbox Invariant For Relay Archive
|
# ECP-0072: CMAF Seedbox Invariant For Relay Archive
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
Archive replay currently stores and serves relay groups exactly as received, but many existing broadcasts were published in `legacy` container mode. Those bytes are not browser-HLS compatible, so archive playback fails despite a valid timeline and object store.
|
Archive replay currently stores and serves relay groups exactly as received, but many existing broadcasts were published in `legacy` container mode. Those bytes are not browser-HLS compatible, so archive playback fails despite a valid timeline and object store.
|
||||||
|
|
@ -20,6 +22,16 @@ Update the NixOS module default `services.every-channel.ec-node.passthrough = tr
|
||||||
- Exact-byte retention avoids drift between live and replay.
|
- Exact-byte retention avoids drift between live and replay.
|
||||||
- Browsers can play CMAF fragments via standard HLS tooling; no custom legacy converter is required for new streams.
|
- Browsers can play CMAF fragments via standard HLS tooling; no custom legacy converter is required for new streams.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep `passthrough=false` as default for all publishers. Rejected because archive replay needs byte-compatible CMAF fragments.
|
||||||
|
- Re-encode archived payloads during replay. Rejected because it adds complexity and breaks exact-byte history semantics.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Flip default `passthrough` to true in CLI and Nix module, then verify new archives play via HLS.
|
||||||
|
- Teardown by explicitly setting `passthrough=false` on hosts needing legacy framing.
|
||||||
|
|
||||||
## Reversibility
|
## Reversibility
|
||||||
|
|
||||||
- Operators can explicitly set `passthrough = false` per host to revert to legacy framing.
|
- Operators can explicitly set `passthrough = false` per host to revert to legacy framing.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0073: Archive Relay Affinity Override
|
# ECP-0073: Archive Relay Affinity Override
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
`wt-archive` workers discover streams from `/api/public-streams` and subscribe to the listed `relay_url`. In practice, `cdn.moq.dev` resolves to region-local relay IPs, and broadcasts published from one region are not consistently visible from another region endpoint.
|
`wt-archive` workers discover streams from `/api/public-streams` and subscribe to the listed `relay_url`. In practice, `cdn.moq.dev` resolves to region-local relay IPs, and broadcasts published from one region are not consistently visible from another region endpoint.
|
||||||
|
|
@ -22,6 +24,11 @@ This allows operators to pin archive ingestion to the same relay endpoint used b
|
||||||
- Keeps deployment-level control in Nix (no app-level migration needed).
|
- Keeps deployment-level control in Nix (no app-level migration needed).
|
||||||
- Reversible with a single config change.
|
- Reversible with a single config change.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep subscribing to directory-provided `relay_url` only. Rejected because cross-region visibility is inconsistent in practice.
|
||||||
|
- Rewrite directory entries per-region. Rejected because this mixes deployment affinity into public directory payloads.
|
||||||
|
|
||||||
## Rollout
|
## Rollout
|
||||||
|
|
||||||
1. Set `archive.relayUrlOverride` on archive hosts that need relay affinity.
|
1. Set `archive.relayUrlOverride` on archive hosts that need relay affinity.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0074: Archive HLS Engine Selection For Chromium
|
# ECP-0074: Archive HLS Engine Selection For Chromium
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
Archive mode currently chooses native HLS whenever `video.canPlayType("application/vnd.apple.mpegurl")` is non-empty.
|
Archive mode currently chooses native HLS whenever `video.canPlayType("application/vnd.apple.mpegurl")` is non-empty.
|
||||||
|
|
@ -16,6 +18,16 @@ Use native HLS only on Safari/iOS user agents. For all other browsers (including
|
||||||
- Keeps Safari native path where it is reliable.
|
- Keeps Safari native path where it is reliable.
|
||||||
- Preserves a single URL and UI flow (`/api/archive/.../master.m3u8`).
|
- Preserves a single URL and UI flow (`/api/archive/.../master.m3u8`).
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep `canPlayType` as the only gate. Rejected because Chromium reports support but fails event-style playback.
|
||||||
|
- Force `hls.js` for all browsers including Safari. Rejected because Safari native playback is already reliable and simpler.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Deploy UA-gated engine selection in web app and validate archive playback on Chromium and Safari.
|
||||||
|
- Teardown by reverting to the previous generic `canPlayType` gate.
|
||||||
|
|
||||||
## Reversibility
|
## Reversibility
|
||||||
|
|
||||||
Revert the UA gate and return to the previous `canPlayType`-only check.
|
Revert the UA gate and return to the previous `canPlayType`-only check.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0075: Bump Web Watcher To `@moq/watch@0.2.0`
|
# ECP-0075: Bump Web Watcher To `@moq/watch@0.2.0`
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
Production web watchers currently load `@moq/watch@0.1.1`. Under live OTA relay streams, Chromium sessions frequently emit runtime failures (`VideoFrame clone` errors and repeated stream resets), leaving playback stalled even after successful subscribe.
|
Production web watchers currently load `@moq/watch@0.1.1`. Under live OTA relay streams, Chromium sessions frequently emit runtime failures (`VideoFrame clone` errors and repeated stream resets), leaving playback stalled even after successful subscribe.
|
||||||
|
|
@ -15,6 +17,16 @@ Set both `name` and `path` attributes on `<moq-watch>` so minor-version attribut
|
||||||
- Pulls in upstream runtime fixes without introducing new local playback logic.
|
- Pulls in upstream runtime fixes without introducing new local playback logic.
|
||||||
- Preserves multi-CDN fallback behavior already used for dependency resilience.
|
- Preserves multi-CDN fallback behavior already used for dependency resilience.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep pin at `0.1.1` and add larger local workarounds. Rejected because upstream fixes already address core runtime failures.
|
||||||
|
- Switch to a different browser player stack immediately. Rejected because this is higher risk than a targeted minor-version bump.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Roll out `@moq/watch@0.2.0` on all CDN import fallbacks and verify live subscribe/playback.
|
||||||
|
- Teardown by repinning imports to `0.1.1`.
|
||||||
|
|
||||||
## Reversibility
|
## Reversibility
|
||||||
|
|
||||||
- Roll back by pinning imports back to `0.1.1` if regressions appear.
|
- Roll back by pinning imports back to `0.1.1` if regressions appear.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# ECP-0076: WebTransport-Only Browser Watcher Path
|
# ECP-0076: WebTransport-Only Browser Watcher Path
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
The browser watcher (`@moq/watch`) races WebTransport against WebSocket fallback by default. In production relay sessions this fallback path correlates with degraded playback behavior (frequent stream resets and unreliable audio despite active subscription).
|
The browser watcher (`@moq/watch`) races WebTransport against WebSocket fallback by default. In production relay sessions this fallback path correlates with degraded playback behavior (frequent stream resets and unreliable audio despite active subscription).
|
||||||
|
|
@ -10,7 +12,7 @@ In `apps/web/app.js`, configure each `<moq-watch>` instance to disable WebSocket
|
||||||
|
|
||||||
- `watch.connection.websocket = { enabled: false }`
|
- `watch.connection.websocket = { enabled: false }`
|
||||||
|
|
||||||
Also set default watcher volume to full (`volume="1"`) and mount live playback on a `<video>` element (with controls) inside `<moq-watch>` so browser audio policies and controls apply predictably. On mount, force audio signals to `muted=false` and `volume=1`.
|
Also set default watcher volume to full (`volume="1"`). Keep canvas live rendering, and on mount force audio signals to `muted=false` and `volume=1`.
|
||||||
|
|
||||||
## Rationale
|
## Rationale
|
||||||
|
|
||||||
|
|
@ -18,6 +20,16 @@ Also set default watcher volume to full (`volume="1"`) and mount live playback o
|
||||||
- Removes fallback-induced variability from live playback behavior.
|
- Removes fallback-induced variability from live playback behavior.
|
||||||
- Keeps implementation local to web app wiring without forking upstream packages.
|
- Keeps implementation local to web app wiring without forking upstream packages.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Leave WebSocket fallback enabled. Rejected because fallback races correlated with unstable live playback.
|
||||||
|
- Fork upstream watcher package for a custom transport stack. Rejected because app-level wiring changes were sufficient.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Deploy connection override to disable websocket fallback and validate live session stability.
|
||||||
|
- Teardown by removing the override and restoring default transport behavior.
|
||||||
|
|
||||||
## Reversibility
|
## Reversibility
|
||||||
|
|
||||||
- Remove the connection override to restore default fallback behavior.
|
- Remove the connection override to restore default fallback behavior.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,39 @@
|
||||||
|
# ECP-0077: Explicit AAC-LC Live Audio Profile In `wt-publish`
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Live OTA inputs expose multiple AC-3 audio tracks (5.1 + stereo language variants). Browser watcher behavior is more stable when the published relay stream has a single explicit AAC-LC stereo track shape.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
In `ec-node wt-publish` transcode mode, force explicit stream mapping and AAC profile:
|
||||||
|
|
||||||
|
- `-map 0:v:0`
|
||||||
|
- `-map 0:a:0?`
|
||||||
|
- `-c:a aac`
|
||||||
|
- `-profile:a aac_low`
|
||||||
|
- `-b:a 160k`
|
||||||
|
- `-ac 2`
|
||||||
|
- `-ar 48000`
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
- Removes ambiguity from ffmpeg auto stream selection when multiple audio tracks exist.
|
||||||
|
- Keeps audio encoding browser-friendly and deterministic.
|
||||||
|
- Preserves optional audio behavior (`0:a:0?`) for edge cases where input temporarily lacks audio.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep ffmpeg auto stream selection/profile defaults. Rejected because multi-track OTA inputs produced unstable browser outcomes.
|
||||||
|
- Preserve AC-3 passthrough for all sources. Rejected because browser compatibility is weaker than explicit AAC-LC stereo.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Enable explicit audio mapping/profile in `wt-publish` transcode mode and verify browser playback across OTA sources.
|
||||||
|
- Teardown by removing explicit `-map` and AAC profile options.
|
||||||
|
|
||||||
|
## Reversibility
|
||||||
|
|
||||||
|
- Revert to ffmpeg auto mapping/profile by removing explicit `-map` and `-profile:a` flags.
|
||||||
|
|
@ -0,0 +1,36 @@
|
||||||
|
# ECP-0078: Live `<video>`-First Rendering With Gesture Audio Unlock
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Live browser playback currently prioritizes canvas rendering. Audio can fail on first load due to autoplay policy (`AudioContext was not allowed to start`) and we still need a robust `<video>` rendering path for native controls.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
In the web watcher mount path:
|
||||||
|
|
||||||
|
1. Render live playback with a `<video>` child in `<moq-watch>` first.
|
||||||
|
2. Start muted at the watcher signal layer for autoplay compatibility, then unlock audio on first user gesture by:
|
||||||
|
- forcing backend `muted=false`, `volume=1`,
|
||||||
|
- toggling paused state to trigger resume,
|
||||||
|
- unmuting the `<video>` element.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
- Preserves the `<video>` UX target while handling browser autoplay constraints explicitly.
|
||||||
|
- Keeps changes local to app wiring without forking upstream MoQ player internals.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep canvas-first rendering only. Rejected because native `<video>` controls/audio handling are still required.
|
||||||
|
- Attempt autoplay with unmuted audio by default. Rejected because browser policy blocks reliable first-play behavior.
|
||||||
|
|
||||||
|
## Rollout / teardown
|
||||||
|
|
||||||
|
- Deploy muted-start plus gesture unlock wiring and validate first-load playback and unmute behavior.
|
||||||
|
- Teardown by removing unlock wiring or reverting to prior renderer mode.
|
||||||
|
|
||||||
|
## Reversibility
|
||||||
|
|
||||||
|
- Remove the unlock wiring (or return to canvas renderer) to restore prior behavior.
|
||||||
|
|
@ -0,0 +1,45 @@
|
||||||
|
# ECP-0079: Governance Hygiene, CI Quality Gates, and Main-Branch Protection
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Recent delivery velocity improved product behavior, but governance and quality signals drifted:
|
||||||
|
|
||||||
|
- active ECPs were not consistently marked with explicit status and alternatives;
|
||||||
|
- pull requests lacked a single, explicit CI gate for core tests plus web build;
|
||||||
|
- deploy could proceed without an explicit prerequisite check job;
|
||||||
|
- branch protection settings were not codified as an operator runbook artifact.
|
||||||
|
|
||||||
|
This conflicts with the constitutional requirement that non-trivial changes remain reviewable and merge through pull requests.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
1. Normalize governance records for the active proposal window (`ECP-0063` through `ECP-0078`):
|
||||||
|
- mark implemented decisions as `Status: Implemented`,
|
||||||
|
- add explicit `Alternatives considered` sections,
|
||||||
|
- ensure rollout/teardown intent is present.
|
||||||
|
2. Add `scripts/ecp-lint.sh` and run it in CI to enforce required ECP sections for active proposals.
|
||||||
|
3. Add a `ci-gates` workflow for pull requests that runs:
|
||||||
|
- ECP lint,
|
||||||
|
- core Rust test subset,
|
||||||
|
- `apps/web` production build.
|
||||||
|
4. Update deploy workflow to include a dedicated `checks` job and make deploy depend on that job.
|
||||||
|
5. Correct Cloudflare deploy docs so manual commands and secret prerequisites match current implementation.
|
||||||
|
6. Add a branch-protection enforcement script and runbook so `main` can be locked to PR merges with required checks.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep governance cleanup manual and ad hoc. Rejected because drift reappears quickly under fast iteration.
|
||||||
|
- Gate only deploy, not pull requests. Rejected because review-time feedback is required before merge.
|
||||||
|
- Rely on UI-only branch protection configuration with no repo script/runbook. Rejected because settings become opaque and harder to audit.
|
||||||
|
|
||||||
|
## Rollout / teardown plan
|
||||||
|
|
||||||
|
- Rollout:
|
||||||
|
- land ECP updates + lint script + CI workflows + docs + branch-protection tooling together;
|
||||||
|
- apply branch protection using the new script;
|
||||||
|
- set required check context to `ci-gates / checks`.
|
||||||
|
- Teardown:
|
||||||
|
- remove `ci-gates` workflow and lint script if governance process is superseded;
|
||||||
|
- relax branch protection via API/script and adjust constitutional process in a superseding ECP.
|
||||||
|
|
@ -0,0 +1,36 @@
|
||||||
|
# ECP-0080: Forgejo-Primary Git Hosting With Mirror-Only Codeberg/GitHub
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Repository governance and CI are currently coupled to Codeberg-specific defaults in scripts and workflows. We want Forgejo to be the primary host for development and automation, while keeping Codeberg and GitHub as mirror endpoints only.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
1. Adopt Forgejo as the primary git/actions host.
|
||||||
|
2. Treat Codeberg and GitHub as push mirrors.
|
||||||
|
3. Disable actions on Codeberg mirror repositories.
|
||||||
|
4. Make workflow API calls host-agnostic by using runtime server/repository context instead of hardcoded Codeberg URLs.
|
||||||
|
5. Add first-party scripts for:
|
||||||
|
- setting primary + mirror remotes,
|
||||||
|
- pushing mirrors,
|
||||||
|
- toggling repository actions via Forgejo/Gitea API.
|
||||||
|
6. Update Forge scripts/docs so host/token defaults point to Forgejo first, with compatibility fallbacks for existing `codeberg-token.age`.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep Codeberg as the primary host and only tune runners. Rejected because the operational target is Forgejo-first ownership.
|
||||||
|
- Maintain duplicate active CI on all hosts. Rejected because it doubles cost/noise and risks divergent automation behavior.
|
||||||
|
- Manually manage remotes and repo settings per developer. Rejected because migration drift is likely without repeatable scripts.
|
||||||
|
|
||||||
|
## Rollout / teardown plan
|
||||||
|
|
||||||
|
- Rollout:
|
||||||
|
- merge workflow/script/doc updates,
|
||||||
|
- configure local remotes to Forgejo primary + mirror remotes,
|
||||||
|
- disable Codeberg actions with the new repository toggle script.
|
||||||
|
- Teardown:
|
||||||
|
- re-enable actions on Codeberg via the same toggle script,
|
||||||
|
- point primary remote back to Codeberg if needed,
|
||||||
|
- revert workflow host-context changes if a single-host lock-in is required.
|
||||||
35
evolution/proposals/ECP-0081-ci-boot-image-deploy.md
Normal file
35
evolution/proposals/ECP-0081-ci-boot-image-deploy.md
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
# ECP-0081: CI Boot Image Deployment to Forgejo Releases
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Runner boot artifacts (netboot/ISO/SD) are currently built by hand from local Linux/Nix environments. That slows repeatable infra rollout and weakens traceability from commit to runnable images.
|
||||||
|
|
||||||
|
The constitution requires infrastructure definitions to live in-repo and stay independently operable. Boot image publication should follow the same Forgejo-primary CI model used for web deploys, while mirror hosts remain distribution-only.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
1. Add a dedicated Forgejo Actions workflow to build runner boot images from flake outputs in CI.
|
||||||
|
2. Trigger this workflow on:
|
||||||
|
- manual dispatch (`workflow_dispatch`), and
|
||||||
|
- release-style tags (`boot-v*`).
|
||||||
|
3. Publish built artifacts to Forgejo Releases on the primary host using repository-scoped API calls and the workflow token.
|
||||||
|
4. Exclude Codeberg mirror runs using the existing server guard (`github.server_url != 'https://codeberg.org'`).
|
||||||
|
5. Start with x86_64 image targets in CI (`netboot`, `iso`) to keep runtime/runner requirements explicit and reversible.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Keep boot image builds fully manual. Rejected because operator discipline alone does not provide consistent provenance.
|
||||||
|
- Push images to external object storage first. Rejected for now because Forgejo Releases are already part of the controlled primary platform.
|
||||||
|
- Build all architectures in one CI pass immediately. Rejected because runner architecture availability is not guaranteed and would make initial rollout brittle.
|
||||||
|
|
||||||
|
## Rollout / teardown plan
|
||||||
|
|
||||||
|
- Rollout:
|
||||||
|
- merge workflow + docs,
|
||||||
|
- run a manual dispatch to verify artifact publication,
|
||||||
|
- optionally create `boot-v*` tags for versioned image drops.
|
||||||
|
- Teardown:
|
||||||
|
- disable or delete the boot-image workflow,
|
||||||
|
- continue using local/manual `nix build` paths from `docs/RUNNER_IMAGES.md`.
|
||||||
36
evolution/proposals/ECP-0082-unifi-pxe-runner-rollout.md
Normal file
36
evolution/proposals/ECP-0082-unifi-pxe-runner-rollout.md
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
# ECP-0082: Unifi PXE Rollout Path for Runner Images
|
||||||
|
|
||||||
|
Status: Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Runner netboot artifacts now publish from CI, but there is no repository-native operating path for fleet provisioning on common prosumer networks (for example Unifi VLANs).
|
||||||
|
|
||||||
|
Unifi DHCP can expose next-server/bootfile settings, but iPXE chainloading often requires conditional bootfile behavior to avoid loops (`ipxe.efi` first stage, script second stage). Not all controller setups expose that cleanly.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
1. Add first-party scripts for local netboot staging and serving:
|
||||||
|
- stage x86_64 netboot artifacts from Forgejo Releases (or local tarball),
|
||||||
|
- stage iPXE UEFI binary for TFTP,
|
||||||
|
- run HTTP + TFTP + ProxyDHCP via `dnsmasq` for deterministic chainloading.
|
||||||
|
2. Keep Unifi DHCP as the IP authority; use ProxyDHCP only to supply bootfile logic.
|
||||||
|
3. Document a concrete NUC rollout sequence for same-VLAN provisioning.
|
||||||
|
4. Keep dependencies minimal (`curl`, `tar`, `python3`, `dnsmasq`) and avoid requiring image flashing workflows.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
- Require Unifi DHCP conditional iPXE rules. Rejected because controller capabilities vary and misconfiguration risks boot loops.
|
||||||
|
- Keep manual USB-only provisioning. Rejected because it increases labor for multi-node rollout.
|
||||||
|
- Add a heavy provisioning stack (MAAS/Foreman/Kickstart integration). Rejected as too much operational overhead for current scale.
|
||||||
|
|
||||||
|
## Rollout / teardown plan
|
||||||
|
|
||||||
|
- Rollout:
|
||||||
|
- merge scripts/docs,
|
||||||
|
- run `netboot-stage` on the boot server,
|
||||||
|
- run `netboot-serve` on the NUC VLAN and boot hosts via PXE.
|
||||||
|
- Teardown:
|
||||||
|
- stop `netboot-serve`,
|
||||||
|
- remove staged artifacts under `tmp/netboot`,
|
||||||
|
- continue with ISO+USB fallback path.
|
||||||
15
justfile
15
justfile
|
|
@ -3,6 +3,21 @@ set shell := ["bash", "-eu", "-o", "pipefail", "-c"]
|
||||||
default:
|
default:
|
||||||
@just --list
|
@just --list
|
||||||
|
|
||||||
|
ecp-lint:
|
||||||
|
./scripts/ecp-lint.sh
|
||||||
|
|
||||||
|
git-hosting:
|
||||||
|
./scripts/git-configure-hosting.sh
|
||||||
|
|
||||||
|
git-mirrors:
|
||||||
|
./scripts/git-push-mirrors.sh
|
||||||
|
|
||||||
|
netboot-stage:
|
||||||
|
./scripts/netboot-stage.sh
|
||||||
|
|
||||||
|
netboot-serve:
|
||||||
|
./scripts/netboot-serve.sh
|
||||||
|
|
||||||
test-core:
|
test-core:
|
||||||
cargo test -p ec-core -p ec-crypto -p ec-moq -p ec-iroh -p ec-linux-iptv
|
cargo test -p ec-core -p ec-crypto -p ec-moq -p ec-iroh -p ec-linux-iptv
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
cd "${root}"
|
cd "${root}"
|
||||||
|
|
||||||
in_file="${1:-secrets/token.txt}"
|
in_file="${1:-secrets/token.txt}"
|
||||||
out_file="${2:-secrets/codeberg-token.age}"
|
out_file="${2:-secrets/forge-token.age}"
|
||||||
|
|
||||||
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-${root}/secrets.nix}"
|
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-${root}/secrets.nix}"
|
||||||
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
||||||
|
|
|
||||||
62
scripts/ecp-lint.sh
Executable file
62
scripts/ecp-lint.sh
Executable file
|
|
@ -0,0 +1,62 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
min_id="${EVERY_CHANNEL_ECP_LINT_MIN_ID:-63}"
|
||||||
|
|
||||||
|
files=()
|
||||||
|
if [[ "$#" -gt 0 ]]; then
|
||||||
|
for arg in "$@"; do
|
||||||
|
files+=("${arg}")
|
||||||
|
done
|
||||||
|
else
|
||||||
|
while IFS= read -r f; do
|
||||||
|
files+=("${f}")
|
||||||
|
done < <(find evolution/proposals -maxdepth 1 -type f -name 'ECP-*.md' | sort)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${#files[@]}" -eq 0 ]]; then
|
||||||
|
echo "ecp-lint: no ECP files found"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
errors=0
|
||||||
|
|
||||||
|
check_pattern() {
|
||||||
|
local file="$1"
|
||||||
|
local regex="$2"
|
||||||
|
local message="$3"
|
||||||
|
if ! rg -q --pcre2 "${regex}" "${file}"; then
|
||||||
|
echo "ecp-lint: ${file}: ${message}" >&2
|
||||||
|
errors=$((errors + 1))
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
for file in "${files[@]}"; do
|
||||||
|
[[ -f "${file}" ]] || continue
|
||||||
|
|
||||||
|
base="$(basename "${file}")"
|
||||||
|
if [[ ! "${base}" =~ ^ECP-([0-9]{4})- ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
id=$((10#${BASH_REMATCH[1]}))
|
||||||
|
if (( id < min_id )); then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
check_pattern "${file}" '^# ECP-[0-9]{4}:' "missing or invalid title"
|
||||||
|
check_pattern "${file}" '^Status: (Draft|Accepted|Implemented|Superseded|Rejected)$' "missing or invalid Status line"
|
||||||
|
check_pattern "${file}" '^## (Problem|Context|Motivation)\b' "missing Problem/Context/Motivation section"
|
||||||
|
check_pattern "${file}" '^## Decision\b' "missing Decision section"
|
||||||
|
check_pattern "${file}" '^## (Alternatives considered|Alternatives)\b' "missing Alternatives considered section"
|
||||||
|
check_pattern "${file}" '^## (Rollout / teardown|Rollout / teardown plan|Rollout / Reversibility|Rollout|Reversibility)\b' "missing Rollout/teardown (or Reversibility) section"
|
||||||
|
done
|
||||||
|
|
||||||
|
if (( errors > 0 )); then
|
||||||
|
echo "ecp-lint: failed with ${errors} issue(s)" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "ecp-lint: ok"
|
||||||
|
|
@ -1,35 +1,6 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Back-compat shim. Prefer `scripts/fj-auth-forge.sh`.
|
||||||
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
cd "${root}"
|
exec "${root}/scripts/fj-auth-forge.sh"
|
||||||
|
|
||||||
# Forgejo CLI: `fj`
|
|
||||||
#
|
|
||||||
# Auth token source order:
|
|
||||||
# 1) CODEBERG_TOKEN env var
|
|
||||||
# 2) `agenix -d secrets/codeberg-token.age` (optional)
|
|
||||||
# 3) `age -d -i <identity> secrets/codeberg-token.age` (optional)
|
|
||||||
|
|
||||||
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-./secrets.nix}"
|
|
||||||
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
|
||||||
|
|
||||||
if [[ -z "${CODEBERG_TOKEN:-}" && -f secrets/codeberg-token.age ]]; then
|
|
||||||
if command -v agenix >/dev/null 2>&1; then
|
|
||||||
export CODEBERG_TOKEN
|
|
||||||
CODEBERG_TOKEN="$(RULES="${rules_file}" agenix -d secrets/codeberg-token.age -i "${identity_file}")"
|
|
||||||
elif command -v age >/dev/null 2>&1; then
|
|
||||||
export CODEBERG_TOKEN
|
|
||||||
CODEBERG_TOKEN="$(age -d -i "${identity_file}" secrets/codeberg-token.age)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "${CODEBERG_TOKEN:-}" ]]; then
|
|
||||||
echo "error: CODEBERG_TOKEN is not set" >&2
|
|
||||||
echo "hint: set CODEBERG_TOKEN or create secrets/codeberg-token.age via agenix" >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Avoid passing the token on the command line (shows up in process listings); use stdin.
|
|
||||||
printf "%s" "${CODEBERG_TOKEN}" | fj -H https://codeberg.org auth add-key every-channel
|
|
||||||
echo "fj configured. Try: fj -H https://codeberg.org whoami"
|
|
||||||
|
|
|
||||||
53
scripts/fj-auth-forge.sh
Executable file
53
scripts/fj-auth-forge.sh
Executable file
|
|
@ -0,0 +1,53 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
# Forgejo CLI: `fj`
|
||||||
|
#
|
||||||
|
# Auth token source order:
|
||||||
|
# 1) EVERY_CHANNEL_FORGE_TOKEN / FORGE_TOKEN / CODEBERG_TOKEN env var
|
||||||
|
# 2) `agenix -d secrets/forge-token.age` or `secrets/codeberg-token.age` (optional)
|
||||||
|
# 3) `age -d -i <identity> secrets/forge-token.age` or `secrets/codeberg-token.age` (optional)
|
||||||
|
|
||||||
|
host="${EVERY_CHANNEL_FORGE_HOST:-https://forge.every.channel}"
|
||||||
|
account="${EVERY_CHANNEL_FORGE_ACCOUNT:-every-channel}"
|
||||||
|
token_file_primary="${EVERY_CHANNEL_FORGE_TOKEN_FILE:-secrets/forge-token.age}"
|
||||||
|
token_file_compat="${EVERY_CHANNEL_CODEBERG_TOKEN_FILE:-secrets/codeberg-token.age}"
|
||||||
|
|
||||||
|
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-./secrets.nix}"
|
||||||
|
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
||||||
|
|
||||||
|
token="${EVERY_CHANNEL_FORGE_TOKEN:-${FORGE_TOKEN:-${CODEBERG_TOKEN:-}}}"
|
||||||
|
|
||||||
|
load_token_from_file() {
|
||||||
|
local candidate="$1"
|
||||||
|
[[ -f "${candidate}" ]] || return 1
|
||||||
|
if command -v agenix >/dev/null 2>&1; then
|
||||||
|
RULES="${rules_file}" agenix -d "${candidate}" -i "${identity_file}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command -v age >/dev/null 2>&1; then
|
||||||
|
age -d -i "${identity_file}" "${candidate}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_primary}" || true)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_compat}" || true)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
echo "error: forge token is not set" >&2
|
||||||
|
echo "hint: set EVERY_CHANNEL_FORGE_TOKEN/FORGE_TOKEN or create ${token_file_primary}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Avoid passing the token on the command line (shows up in process listings); use stdin.
|
||||||
|
printf "%s" "${token}" | fj -H "${host}" auth add-key "${account}"
|
||||||
|
echo "fj configured. Try: fj -H ${host} whoami"
|
||||||
145
scripts/fj-enforce-branch-protection.sh
Executable file
145
scripts/fj-enforce-branch-protection.sh
Executable file
|
|
@ -0,0 +1,145 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
host="${EVERY_CHANNEL_FORGE_HOST:-https://forge.every.channel}"
|
||||||
|
repo="${EVERY_CHANNEL_FORGE_REPO:-every-channel/every.channel}"
|
||||||
|
branch="${EVERY_CHANNEL_PROTECTED_BRANCH:-main}"
|
||||||
|
required_checks_csv="${EVERY_CHANNEL_REQUIRED_CHECKS:-ci-gates / checks}"
|
||||||
|
required_approvals="${EVERY_CHANNEL_REQUIRED_APPROVALS:-1}"
|
||||||
|
require_signed_commits_raw="${EVERY_CHANNEL_REQUIRE_SIGNED_COMMITS:-true}"
|
||||||
|
|
||||||
|
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-./secrets.nix}"
|
||||||
|
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
||||||
|
token_file_primary="${EVERY_CHANNEL_FORGE_TOKEN_FILE:-secrets/forge-token.age}"
|
||||||
|
token_file_compat="${EVERY_CHANNEL_CODEBERG_TOKEN_FILE:-secrets/codeberg-token.age}"
|
||||||
|
|
||||||
|
token="${EVERY_CHANNEL_FORGE_TOKEN:-${FORGE_TOKEN:-${CODEBERG_TOKEN:-}}}"
|
||||||
|
|
||||||
|
load_token_from_file() {
|
||||||
|
local candidate="$1"
|
||||||
|
[[ -f "${candidate}" && -f "${identity_file}" ]] || return 1
|
||||||
|
if command -v agenix >/dev/null 2>&1; then
|
||||||
|
RULES="${rules_file}" agenix -d "${candidate}" -i "${identity_file}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command -v age >/dev/null 2>&1; then
|
||||||
|
age -d -i "${identity_file}" "${candidate}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_primary}" || true)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_compat}" || true)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
echo "error: forge token is not set" >&2
|
||||||
|
echo "hint: set EVERY_CHANNEL_FORGE_TOKEN/FORGE_TOKEN or configure ${token_file_primary}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! "${repo}" =~ ^[^/]+/[^/]+$ ]]; then
|
||||||
|
echo "error: EVERY_CHANNEL_FORGE_REPO must be '<owner>/<repo>' (got '${repo}')" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "${required_approvals}" in
|
||||||
|
''|*[!0-9]*)
|
||||||
|
echo "error: EVERY_CHANNEL_REQUIRED_APPROVALS must be a non-negative integer" >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
require_signed_commits_raw_lc="$(printf '%s' "${require_signed_commits_raw}" | tr '[:upper:]' '[:lower:]')"
|
||||||
|
require_signed_commits="false"
|
||||||
|
if [[ "${require_signed_commits_raw_lc}" == "true" || "${require_signed_commits_raw}" == "1" ]]; then
|
||||||
|
require_signed_commits="true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
owner="${repo%%/*}"
|
||||||
|
repo_name="${repo#*/}"
|
||||||
|
api="${host%/}/api/v1/repos/${owner}/${repo_name}/branch_protections"
|
||||||
|
|
||||||
|
contexts_json=""
|
||||||
|
IFS=',' read -r -a contexts <<< "${required_checks_csv}"
|
||||||
|
for ctx in "${contexts[@]}"; do
|
||||||
|
trimmed="$(echo "${ctx}" | sed -E 's/^[[:space:]]+//; s/[[:space:]]+$//')"
|
||||||
|
[[ -n "${trimmed}" ]] || continue
|
||||||
|
escaped="$(printf '%s' "${trimmed}" | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||||
|
if [[ -n "${contexts_json}" ]]; then
|
||||||
|
contexts_json+=", "
|
||||||
|
fi
|
||||||
|
contexts_json+="\"${escaped}\""
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "${contexts_json}" ]]; then
|
||||||
|
echo "error: no required status checks specified (EVERY_CHANNEL_REQUIRED_CHECKS)" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
payload="$(cat <<JSON
|
||||||
|
{
|
||||||
|
"rule_name": "${branch}",
|
||||||
|
"enable_push": false,
|
||||||
|
"enable_push_whitelist": false,
|
||||||
|
"enable_merge_whitelist": false,
|
||||||
|
"enable_status_check": true,
|
||||||
|
"status_check_contexts": [${contexts_json}],
|
||||||
|
"required_approvals": ${required_approvals},
|
||||||
|
"require_signed_commits": ${require_signed_commits}
|
||||||
|
}
|
||||||
|
JSON
|
||||||
|
)"
|
||||||
|
|
||||||
|
status="$(curl -sS -o /dev/null -w '%{http_code}' \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
"${api}/${branch}" || true)"
|
||||||
|
|
||||||
|
if [[ "${status}" == "404" ]]; then
|
||||||
|
curl -fsSL -X POST \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
-H "content-type: application/json" \
|
||||||
|
"${api}" \
|
||||||
|
-d "${payload}" >/dev/null
|
||||||
|
elif [[ "${status}" == "200" ]]; then
|
||||||
|
curl -fsSL -X PATCH \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
-H "content-type: application/json" \
|
||||||
|
"${api}/${branch}" \
|
||||||
|
-d "${payload}" >/dev/null
|
||||||
|
else
|
||||||
|
echo "error: unexpected status while reading branch protection: ${status}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
current="$(curl -fsSL \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
"${api}/${branch}")"
|
||||||
|
|
||||||
|
if ! printf '%s' "${current}" | rg -q '"enable_status_check":\s*true'; then
|
||||||
|
echo "error: branch protection update did not enable status checks" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if ! printf '%s' "${current}" | rg -q "\"required_approvals\":\\s*${required_approvals}"; then
|
||||||
|
echo "error: branch protection update did not set required approvals to ${required_approvals}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
for ctx in "${contexts[@]}"; do
|
||||||
|
trimmed="$(echo "${ctx}" | sed -E 's/^[[:space:]]+//; s/[[:space:]]+$//')"
|
||||||
|
[[ -n "${trimmed}" ]] || continue
|
||||||
|
if ! printf '%s' "${current}" | rg -F -q "\"${trimmed}\""; then
|
||||||
|
echo "error: required status check context missing after update: ${trimmed}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "ok: enforced branch protection for ${repo}:${branch}"
|
||||||
|
echo "ok: required checks: ${required_checks_csv}"
|
||||||
|
echo "ok: required approvals: ${required_approvals}"
|
||||||
|
|
@ -4,7 +4,7 @@ set -euo pipefail
|
||||||
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
cd "${root}"
|
cd "${root}"
|
||||||
|
|
||||||
host="${EVERY_CHANNEL_FORGE_HOST:-https://codeberg.org}"
|
host="${EVERY_CHANNEL_FORGE_HOST:-https://forge.every.channel}"
|
||||||
repo="${EVERY_CHANNEL_FORGE_REPO:-every-channel/every.channel}"
|
repo="${EVERY_CHANNEL_FORGE_REPO:-every-channel/every.channel}"
|
||||||
secret_name="${EVERY_CHANNEL_FORGE_AGE_SECRET_NAME:-AGE_FORGE_SSH_KEY}"
|
secret_name="${EVERY_CHANNEL_FORGE_AGE_SECRET_NAME:-AGE_FORGE_SSH_KEY}"
|
||||||
key_path="${1:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
key_path="${1:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
||||||
|
|
@ -18,7 +18,7 @@ if ! command -v fj >/dev/null 2>&1; then
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"${root}/scripts/fj-auth-codeberg.sh" >/dev/null
|
"${root}/scripts/fj-auth-forge.sh" >/dev/null
|
||||||
|
|
||||||
key_data="$(base64 < "${key_path}" | tr -d '\n')"
|
key_data="$(base64 < "${key_path}" | tr -d '\n')"
|
||||||
if [[ -z "${key_data}" ]]; then
|
if [[ -z "${key_data}" ]]; then
|
||||||
|
|
|
||||||
82
scripts/forge-set-repo-actions.sh
Executable file
82
scripts/forge-set-repo-actions.sh
Executable file
|
|
@ -0,0 +1,82 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
host="${EVERY_CHANNEL_FORGE_HOST:-https://forge.every.channel}"
|
||||||
|
repo="${EVERY_CHANNEL_FORGE_REPO:-every-channel/every.channel}"
|
||||||
|
enabled_raw="${EVERY_CHANNEL_FORGE_ACTIONS_ENABLED:-false}"
|
||||||
|
|
||||||
|
rules_file="${EVERY_CHANNEL_AGE_RULES_FILE:-./secrets.nix}"
|
||||||
|
identity_file="${EVERY_CHANNEL_AGE_IDENTITY_FILE:-$HOME/.config/every.channel/keys/founder_ed25519}"
|
||||||
|
token_file_primary="${EVERY_CHANNEL_FORGE_TOKEN_FILE:-secrets/forge-token.age}"
|
||||||
|
token_file_compat="${EVERY_CHANNEL_CODEBERG_TOKEN_FILE:-secrets/codeberg-token.age}"
|
||||||
|
|
||||||
|
token="${EVERY_CHANNEL_FORGE_TOKEN:-${FORGE_TOKEN:-${CODEBERG_TOKEN:-}}}"
|
||||||
|
|
||||||
|
load_token_from_file() {
|
||||||
|
local candidate="$1"
|
||||||
|
[[ -f "${candidate}" && -f "${identity_file}" ]] || return 1
|
||||||
|
if command -v agenix >/dev/null 2>&1; then
|
||||||
|
RULES="${rules_file}" agenix -d "${candidate}" -i "${identity_file}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command -v age >/dev/null 2>&1; then
|
||||||
|
age -d -i "${identity_file}" "${candidate}" 2>/dev/null || return 1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_primary}" || true)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
token="$(load_token_from_file "${token_file_compat}" || true)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${token}" ]]; then
|
||||||
|
echo "error: forge token is not set" >&2
|
||||||
|
echo "hint: set EVERY_CHANNEL_FORGE_TOKEN/FORGE_TOKEN or configure ${token_file_primary}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! "${repo}" =~ ^[^/]+/[^/]+$ ]]; then
|
||||||
|
echo "error: EVERY_CHANNEL_FORGE_REPO must be '<owner>/<repo>' (got '${repo}')" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
enabled_lc="$(printf '%s' "${enabled_raw}" | tr '[:upper:]' '[:lower:]')"
|
||||||
|
enabled="false"
|
||||||
|
if [[ "${enabled_lc}" == "true" || "${enabled_raw}" == "1" ]]; then
|
||||||
|
enabled="true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
owner="${repo%%/*}"
|
||||||
|
repo_name="${repo#*/}"
|
||||||
|
api="${host%/}/api/v1/repos/${owner}/${repo_name}"
|
||||||
|
|
||||||
|
payload="$(cat <<JSON
|
||||||
|
{
|
||||||
|
"has_actions": ${enabled}
|
||||||
|
}
|
||||||
|
JSON
|
||||||
|
)"
|
||||||
|
|
||||||
|
curl -fsSL -X PATCH \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
-H "content-type: application/json" \
|
||||||
|
"${api}" \
|
||||||
|
-d "${payload}" >/dev/null
|
||||||
|
|
||||||
|
current="$(curl -fsSL \
|
||||||
|
-H "Authorization: token ${token}" \
|
||||||
|
"${api}")"
|
||||||
|
|
||||||
|
if ! printf '%s' "${current}" | rg -q "\"has_actions\":\\s*${enabled}"; then
|
||||||
|
echo "error: repository actions state did not update to ${enabled}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "ok: set has_actions=${enabled} for ${repo} on ${host}"
|
||||||
38
scripts/git-configure-hosting.sh
Executable file
38
scripts/git-configure-hosting.sh
Executable file
|
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
primary_remote="${EVERY_CHANNEL_PRIMARY_REMOTE:-origin}"
|
||||||
|
primary_url="${EVERY_CHANNEL_PRIMARY_GIT_URL:-git@forge.every.channel:every-channel/every.channel.git}"
|
||||||
|
|
||||||
|
codeberg_remote="${EVERY_CHANNEL_CODEBERG_REMOTE:-mirror-codeberg}"
|
||||||
|
codeberg_url="${EVERY_CHANNEL_CODEBERG_GIT_URL:-git@codeberg.org:every-channel/every.channel.git}"
|
||||||
|
|
||||||
|
github_remote="${EVERY_CHANNEL_GITHUB_REMOTE:-mirror-github}"
|
||||||
|
github_url="${EVERY_CHANNEL_GITHUB_GIT_URL:-git@github.com:every-channel/every.channel.git}"
|
||||||
|
|
||||||
|
legacy_codeberg_remote="${EVERY_CHANNEL_LEGACY_CODEBERG_REMOTE:-codeberg}"
|
||||||
|
|
||||||
|
set_remote_url() {
|
||||||
|
local name="$1"
|
||||||
|
local url="$2"
|
||||||
|
if git remote get-url "${name}" >/dev/null 2>&1; then
|
||||||
|
git remote set-url "${name}" "${url}"
|
||||||
|
else
|
||||||
|
git remote add "${name}" "${url}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# If a legacy `codeberg` remote exists and mirror-codeberg does not, preserve it as a mirror remote.
|
||||||
|
if git remote get-url "${legacy_codeberg_remote}" >/dev/null 2>&1 && ! git remote get-url "${codeberg_remote}" >/dev/null 2>&1; then
|
||||||
|
git remote rename "${legacy_codeberg_remote}" "${codeberg_remote}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set_remote_url "${primary_remote}" "${primary_url}"
|
||||||
|
set_remote_url "${codeberg_remote}" "${codeberg_url}"
|
||||||
|
set_remote_url "${github_remote}" "${github_url}"
|
||||||
|
|
||||||
|
echo "ok: configured primary + mirror remotes"
|
||||||
|
git remote -v
|
||||||
23
scripts/git-push-mirrors.sh
Executable file
23
scripts/git-push-mirrors.sh
Executable file
|
|
@ -0,0 +1,23 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
branch="${EVERY_CHANNEL_MIRROR_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}"
|
||||||
|
push_tags="${EVERY_CHANNEL_MIRROR_PUSH_TAGS:-true}"
|
||||||
|
remotes="${EVERY_CHANNEL_MIRROR_REMOTES:-mirror-codeberg mirror-github}"
|
||||||
|
|
||||||
|
for remote in ${remotes}; do
|
||||||
|
if ! git remote get-url "${remote}" >/dev/null 2>&1; then
|
||||||
|
echo "warn: remote not configured, skipping: ${remote}" >&2
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo "sync: ${remote} (${branch})"
|
||||||
|
git push "${remote}" "${branch}:${branch}"
|
||||||
|
if [[ "${push_tags}" == "true" || "${push_tags}" == "1" ]]; then
|
||||||
|
git push "${remote}" --tags
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "ok: mirror push complete"
|
||||||
99
scripts/netboot-serve.sh
Executable file
99
scripts/netboot-serve.sh
Executable file
|
|
@ -0,0 +1,99 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
netboot_root="${EVERY_CHANNEL_NETBOOT_ROOT:-tmp/netboot}"
|
||||||
|
http_dir="${netboot_root}/http"
|
||||||
|
tftp_dir="${netboot_root}/tftp"
|
||||||
|
|
||||||
|
listen_ip="${EVERY_CHANNEL_NETBOOT_LISTEN_IP:-}"
|
||||||
|
interface_name="${EVERY_CHANNEL_NETBOOT_INTERFACE:-}"
|
||||||
|
proxy_subnet="${EVERY_CHANNEL_NETBOOT_PROXY_SUBNET:-}"
|
||||||
|
netboot_hostname="${EVERY_CHANNEL_NETBOOT_HOSTNAME:-}"
|
||||||
|
http_port="${EVERY_CHANNEL_NETBOOT_HTTP_PORT:-8080}"
|
||||||
|
dnsmasq_port="${EVERY_CHANNEL_NETBOOT_DNS_PORT:-0}"
|
||||||
|
|
||||||
|
need_cmd() {
|
||||||
|
local name="$1"
|
||||||
|
if ! command -v "${name}" >/dev/null 2>&1; then
|
||||||
|
echo "error: required command not found: ${name}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
need_cmd dnsmasq
|
||||||
|
need_cmd python3
|
||||||
|
|
||||||
|
if [[ "$(id -u)" -ne 0 ]]; then
|
||||||
|
echo "error: netboot-serve requires root (TFTP + ProxyDHCP ports)." >&2
|
||||||
|
echo "hint: run with sudo and pass env vars, for example:" >&2
|
||||||
|
echo " sudo EVERY_CHANNEL_NETBOOT_LISTEN_IP=10.20.30.2 EVERY_CHANNEL_NETBOOT_INTERFACE=eth0 EVERY_CHANNEL_NETBOOT_PROXY_SUBNET=10.20.30.0/24 EVERY_CHANNEL_NETBOOT_HOSTNAME=boot.every.channel ./scripts/netboot-serve.sh" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${listen_ip}" ]]; then
|
||||||
|
echo "error: set EVERY_CHANNEL_NETBOOT_LISTEN_IP (boot server IP on NUC VLAN)" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${interface_name}" ]]; then
|
||||||
|
echo "error: set EVERY_CHANNEL_NETBOOT_INTERFACE (interface on NUC VLAN)" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${proxy_subnet}" ]]; then
|
||||||
|
echo "error: set EVERY_CHANNEL_NETBOOT_PROXY_SUBNET (for example 10.20.30.0/24)" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
if [[ -z "${netboot_hostname}" ]]; then
|
||||||
|
netboot_hostname="${listen_ip}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
for required in "${http_dir}/kernel" "${http_dir}/initrd" "${http_dir}/netboot.ipxe" "${tftp_dir}/ipxe.efi"; do
|
||||||
|
if [[ ! -f "${required}" ]]; then
|
||||||
|
echo "error: missing required staged file: ${required}" >&2
|
||||||
|
echo "hint: run ./scripts/netboot-stage.sh first" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
run_dir="$(mktemp -d)"
|
||||||
|
cleanup() {
|
||||||
|
if [[ -n "${http_pid:-}" ]] && kill -0 "${http_pid}" >/dev/null 2>&1; then
|
||||||
|
kill "${http_pid}" >/dev/null 2>&1 || true
|
||||||
|
wait "${http_pid}" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
rm -rf "${run_dir}"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT INT TERM
|
||||||
|
|
||||||
|
cat > "${run_dir}/dnsmasq.conf" <<EOF
|
||||||
|
port=${dnsmasq_port}
|
||||||
|
bind-interfaces
|
||||||
|
interface=${interface_name}
|
||||||
|
listen-address=${listen_ip}
|
||||||
|
log-dhcp
|
||||||
|
enable-tftp
|
||||||
|
tftp-root=${tftp_dir}
|
||||||
|
dhcp-range=${proxy_subnet},proxy
|
||||||
|
dhcp-userclass=set:ipxe,iPXE
|
||||||
|
dhcp-match=set:efi64,option:client-arch,7
|
||||||
|
dhcp-match=set:efi64,option:client-arch,9
|
||||||
|
dhcp-option=66,${netboot_hostname}
|
||||||
|
dhcp-boot=tag:!ipxe,tag:efi64,ipxe.efi
|
||||||
|
dhcp-boot=tag:ipxe,tag:efi64,http://${netboot_hostname}:${http_port}/netboot.ipxe
|
||||||
|
dhcp-boot=tag:!ipxe,ipxe.efi
|
||||||
|
dhcp-boot=tag:ipxe,http://${netboot_hostname}:${http_port}/netboot.ipxe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
python3 -m http.server "${http_port}" --bind "${listen_ip}" --directory "${http_dir}" >/tmp/every-channel-netboot-http.log 2>&1 &
|
||||||
|
http_pid="$!"
|
||||||
|
|
||||||
|
echo "ok: HTTP serving ${http_dir} on http://${listen_ip}:${http_port}/"
|
||||||
|
echo "ok: advertised netboot host: ${netboot_hostname}"
|
||||||
|
echo "ok: TFTP serving ${tftp_dir} on ${listen_ip}:69"
|
||||||
|
echo "ok: ProxyDHCP active for ${proxy_subnet} on interface ${interface_name}"
|
||||||
|
echo "ok: Use normal Unifi DHCP for IP assignment; do not configure Unifi DHCP bootfile while proxy mode is active."
|
||||||
|
echo
|
||||||
|
echo "Press Ctrl+C to stop."
|
||||||
|
dnsmasq --no-daemon --conf-file="${run_dir}/dnsmasq.conf"
|
||||||
130
scripts/netboot-stage.sh
Executable file
130
scripts/netboot-stage.sh
Executable file
|
|
@ -0,0 +1,130 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "${root}"
|
||||||
|
|
||||||
|
forge_host="${EVERY_CHANNEL_FORGE_HOST:-https://forge.every.channel}"
|
||||||
|
forge_repo="${EVERY_CHANNEL_FORGE_REPO:-every-channel/every.channel}"
|
||||||
|
release_tag="${EVERY_CHANNEL_NETBOOT_RELEASE_TAG:-}"
|
||||||
|
local_tarball="${EVERY_CHANNEL_NETBOOT_TARBALL:-}"
|
||||||
|
out_root="${EVERY_CHANNEL_NETBOOT_ROOT:-tmp/netboot}"
|
||||||
|
ipxe_efi_url="${EVERY_CHANNEL_IPXE_EFI_URL:-https://boot.ipxe.org/snponly.efi}"
|
||||||
|
netboot_hostname="${EVERY_CHANNEL_NETBOOT_HOSTNAME:-boot.every.channel}"
|
||||||
|
http_port="${EVERY_CHANNEL_NETBOOT_HTTP_PORT:-8080}"
|
||||||
|
token="${EVERY_CHANNEL_FORGE_TOKEN:-${FORGE_TOKEN:-${CODEBERG_TOKEN:-}}}"
|
||||||
|
|
||||||
|
need_cmd() {
|
||||||
|
local name="$1"
|
||||||
|
if ! command -v "${name}" >/dev/null 2>&1; then
|
||||||
|
echo "error: required command not found: ${name}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
need_cmd curl
|
||||||
|
need_cmd tar
|
||||||
|
need_cmd python3
|
||||||
|
|
||||||
|
tmp_dir="$(mktemp -d)"
|
||||||
|
cleanup() {
|
||||||
|
rm -rf "${tmp_dir}"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
archive_path="${tmp_dir}/netboot.tar.gz"
|
||||||
|
release_asset_url=""
|
||||||
|
|
||||||
|
if [[ -n "${local_tarball}" ]]; then
|
||||||
|
if [[ ! -f "${local_tarball}" ]]; then
|
||||||
|
echo "error: netboot tarball not found: ${local_tarball}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
cp -f "${local_tarball}" "${archive_path}"
|
||||||
|
else
|
||||||
|
api_base="${forge_host%/}/api/v1/repos/${forge_repo}"
|
||||||
|
release_endpoint="${api_base}/releases/latest"
|
||||||
|
if [[ -n "${release_tag}" ]]; then
|
||||||
|
release_endpoint="${api_base}/releases/tags/${release_tag}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
auth_args=()
|
||||||
|
if [[ -n "${token}" ]]; then
|
||||||
|
auth_args=(-H "Authorization: token ${token}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
release_json="${tmp_dir}/release.json"
|
||||||
|
curl -fsSL "${auth_args[@]}" "${release_endpoint}" -o "${release_json}"
|
||||||
|
|
||||||
|
release_asset_url="$(
|
||||||
|
python3 - "${release_json}" <<'PY'
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
|
path = sys.argv[1]
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
assets = data.get("assets", [])
|
||||||
|
candidates = []
|
||||||
|
for asset in assets:
|
||||||
|
name = asset.get("name", "")
|
||||||
|
if name.startswith("ec-runner-x86_64-netboot-") and name.endswith(".tar.gz"):
|
||||||
|
candidates.append(asset)
|
||||||
|
|
||||||
|
if not candidates:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Pick newest by release ordering if API already sorted; otherwise prefer largest id.
|
||||||
|
chosen = sorted(candidates, key=lambda x: x.get("id", 0))[-1]
|
||||||
|
print(chosen.get("browser_download_url", ""))
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
|
||||||
|
if [[ -z "${release_asset_url}" ]]; then
|
||||||
|
echo "error: unable to find x86_64 netboot asset in release" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
curl -fsSL "${auth_args[@]}" -o "${archive_path}" "${release_asset_url}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
http_dir="${out_root}/http"
|
||||||
|
tftp_dir="${out_root}/tftp"
|
||||||
|
rm -rf "${http_dir}"
|
||||||
|
mkdir -p "${http_dir}" "${tftp_dir}"
|
||||||
|
|
||||||
|
tar -xzf "${archive_path}" -C "${http_dir}"
|
||||||
|
|
||||||
|
for required in kernel initrd netboot.ipxe; do
|
||||||
|
if [[ ! -f "${http_dir}/${required}" ]]; then
|
||||||
|
echo "error: extracted netboot bundle is missing ${required}" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -fsSL -o "${tftp_dir}/ipxe.efi" "${ipxe_efi_url}"
|
||||||
|
cp -f "${http_dir}/netboot.ipxe" "${tftp_dir}/netboot.ipxe"
|
||||||
|
|
||||||
|
cat > "${tftp_dir}/bootstrap.ipxe" <<'EOF'
|
||||||
|
#!ipxe
|
||||||
|
dhcp
|
||||||
|
chain http://__NETBOOT_HOST__:__HTTP_PORT__/netboot.ipxe
|
||||||
|
EOF
|
||||||
|
sed -i.bak \
|
||||||
|
-e "s#__NETBOOT_HOST__#${netboot_hostname}#g" \
|
||||||
|
-e "s#__HTTP_PORT__#${http_port}#g" \
|
||||||
|
"${tftp_dir}/bootstrap.ipxe"
|
||||||
|
rm -f "${tftp_dir}/bootstrap.ipxe.bak"
|
||||||
|
|
||||||
|
echo "ok: staged netboot content"
|
||||||
|
echo "ok: http root: ${http_dir}"
|
||||||
|
echo "ok: tftp root: ${tftp_dir}"
|
||||||
|
echo "ok: netboot hostname: ${netboot_hostname}"
|
||||||
|
echo "ok: netboot http port: ${http_port}"
|
||||||
|
if [[ -n "${release_asset_url}" ]]; then
|
||||||
|
echo "ok: source asset: ${release_asset_url}"
|
||||||
|
else
|
||||||
|
echo "ok: source asset: ${local_tarball}"
|
||||||
|
fi
|
||||||
|
echo "hint: run sudo ./scripts/netboot-serve.sh to expose HTTP+TFTP+ProxyDHCP"
|
||||||
|
|
@ -6,5 +6,6 @@ let
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
"secrets/cloudflare-api-token.age".publicKeys = [ founder forge ];
|
"secrets/cloudflare-api-token.age".publicKeys = [ founder forge ];
|
||||||
|
"secrets/forge-token.age".publicKeys = [ founder forge ];
|
||||||
"secrets/codeberg-token.age".publicKeys = [ founder forge ];
|
"secrets/codeberg-token.age".publicKeys = [ founder forge ];
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,8 @@ nix develop -c ./scripts/fj-set-age-key-secret.sh ~/.config/every.channel/keys/f
|
||||||
|
|
||||||
- `secrets/secrets.nix`: recipients + secret file mapping
|
- `secrets/secrets.nix`: recipients + secret file mapping
|
||||||
- `secrets/cloudflare-api-token.age`: encrypted Cloudflare API token (used by deploy workflow)
|
- `secrets/cloudflare-api-token.age`: encrypted Cloudflare API token (used by deploy workflow)
|
||||||
- `secrets/codeberg-token.age`: encrypted Codeberg/Forgejo token for `fj` (optional)
|
- `secrets/forge-token.age`: encrypted Forgejo API token for admin scripts (optional, preferred)
|
||||||
|
- `secrets/codeberg-token.age`: encrypted Codeberg token for compatibility/mirror admin scripts (optional)
|
||||||
|
|
||||||
## Create / edit secrets (local)
|
## Create / edit secrets (local)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue