Wire HDHomeRun observations and recover Forge OP Stack

This commit is contained in:
every.channel 2026-05-03 20:24:04 -07:00
parent 8065860449
commit 0d86104762
No known key found for this signature in database
18 changed files with 1613 additions and 58 deletions

View file

@ -309,6 +309,60 @@ in
description = "Pass `EVERY_CHANNEL_NBC_NO_SANDBOX=1` for Chrome worker sessions.";
};
mvpdProvider = lib.mkOption {
type = lib.types.str;
default = "Verizon Fios";
description = "MVPD provider name used when the NBC worker must choose a TV provider.";
};
mvpdUsernameFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "Optional root-managed file containing the MVPD username for unattended NBC login.";
};
mvpdPasswordFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "Optional root-managed file containing the MVPD password for unattended NBC login.";
};
isolateWithUserNetns = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Launch NBC browser-backed workers inside a rootless user+network namespace backed by
slirp4netns. This keeps the Chrome / ec-node process tree in its own network context
while still using the host's active upstream route.
'';
};
requireMullvad = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Refuse to start NBC browser-backed workers until `mullvad status` reports a connected
tunnel. This assumes the host Mullvad daemon is already logged in and connected.
'';
};
mullvadWaitSeconds = lib.mkOption {
type = lib.types.ints.positive;
default = 90;
description = "Maximum time to wait for Mullvad connectivity before failing an NBC worker start.";
};
mullvadLocation = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "USA";
description = ''
Optional case-insensitive substring that must appear in `mullvad status` before an NBC
worker starts. Use this to pin workers to a country or city family without committing the
operational login material itself.
'';
};
vnc = {
enable = lib.mkOption {
type = lib.types.bool;
@ -435,6 +489,11 @@ in
pkgs.iproute2
cfg.package
]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ pkgs.mullvad-vpn ]
++ lib.optionals (isNbc && cfg.nbc.isolateWithUserNetns) [
pkgs.slirp4netns
pkgs.util-linux
]
++ lib.optionals cfg.hdhomerun.autoDiscover [ pkgs.jq cfg.discoveryPackage ];
text =
let
@ -452,10 +511,75 @@ in
controlDiscoveryStr = if cfg.control.discovery == null then "" else cfg.control.discovery;
controlIrohSecretStr = if cfg.control.irohSecret == null then "" else cfg.control.irohSecret;
controlGossipPeerLines = lib.concatMapStrings (peer: "cmd+=(--gossip-peer ${lib.escapeShellArg peer})\n") cfg.control.gossipPeers;
nbcMullvadLocationStr = if cfg.nbc.mullvadLocation == null then "" else cfg.nbc.mullvadLocation;
in
''
set -euo pipefail
wait_for_mullvad() {
local wait_seconds status expected
wait_seconds=${toString cfg.nbc.mullvadWaitSeconds}
expected=${lib.escapeShellArg nbcMullvadLocationStr}
for _ in $(seq 1 "$wait_seconds"); do
status="$(mullvad status 2>/dev/null || true)"
if [[ "$status" == Connected* ]]; then
if [[ -z "$expected" ]] || printf '%s\n' "$status" | grep -Fqi -- "$expected"; then
return 0
fi
fi
sleep 1
done
echo "ec-node: Mullvad was not connected${lib.optionalString (cfg.nbc.mullvadLocation != null) " to the expected location"} within ${toString cfg.nbc.mullvadWaitSeconds}s" >&2
mullvad status >&2 || true
return 1
}
run_in_user_netns() {
local tmpdir pid_file ready_fifo ns_pid slirp_pid status
tmpdir="$(mktemp -d /tmp/${unit}.usernet.XXXXXX)"
pid_file="$tmpdir/pid"
ready_fifo="$tmpdir/ready"
mkfifo "$ready_fifo"
# shellcheck disable=SC2016
unshare --user --map-root-user --net ${pkgs.bash}/bin/bash -lc '
set -euo pipefail
ip link set lo up
echo $$ > "$1"
read -r _ < "$2"
shift 2
exec "$@"
' bash "$pid_file" "$ready_fifo" "''${cmd[@]}" &
ns_pid=$!
for _ in $(seq 1 50); do
[[ -s "$pid_file" ]] && break
sleep 0.1
done
if [[ ! -s "$pid_file" ]]; then
echo "ec-node: timed out waiting for NBC user-netns PID" >&2
kill "$ns_pid" 2>/dev/null || true
rm -rf "$tmpdir"
return 1
fi
slirp4netns --configure --mtu=1500 "$(cat "$pid_file")" tap0 >"$tmpdir/slirp.log" 2>&1 &
slirp_pid=$!
sleep 1
printf 'go\n' > "$ready_fifo"
set +e
wait "$ns_pid"
status=$?
set -e
kill "$slirp_pid" 2>/dev/null || true
wait "$slirp_pid" 2>/dev/null || true
rm -rf "$tmpdir"
return "$status"
}
nbc_url=${lib.escapeShellArg nbcUrlStr}
input=""
if [[ -z "$nbc_url" ]]; then
@ -596,7 +720,16 @@ in
# quickly during activation.
trap 'exit 0' INT TERM
while true; do
"''${cmd[@]}" || true
${lib.optionalString (isNbc && cfg.nbc.requireMullvad) ''
if ! wait_for_mullvad; then
sleep 2
continue
fi
''}
${lib.optionalString (isNbc && cfg.nbc.isolateWithUserNetns) "run_in_user_netns || true"}
${lib.optionalString (!isNbc || !cfg.nbc.isolateWithUserNetns) ''
"''${cmd[@]}" || true
''}
sleep 2
done
'';
@ -609,10 +742,12 @@ in
wantedBy = [ "multi-user.target" ];
after =
[ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ];
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ "mullvad-daemon.service" ];
wants =
[ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ];
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ "mullvad-daemon.service" ];
# Keep the unit from entering "failed" due to rapid restarts (deploy-flake treats
# failed units during `switch-to-configuration test` as a deployment failure).
@ -652,13 +787,22 @@ in
environment =
cfg.environment
// lib.optionalAttrs isNbc {
DISPLAY = cfg.nbc.display;
EVERY_CHANNEL_NBC_CHROME_PATH = cfg.nbc.chromeBinary;
EVERY_CHANNEL_NBC_PROFILE_DIR = cfg.nbc.profileDir;
EVERY_CHANNEL_NBC_NO_SANDBOX = if cfg.nbc.noSandbox then "1" else "0";
HOME = "/var/lib/every-channel";
};
// lib.optionalAttrs isNbc (
{
DISPLAY = cfg.nbc.display;
EVERY_CHANNEL_NBC_CHROME_PATH = cfg.nbc.chromeBinary;
EVERY_CHANNEL_NBC_MVPD_PROVIDER = cfg.nbc.mvpdProvider;
EVERY_CHANNEL_NBC_PROFILE_DIR = cfg.nbc.profileDir;
EVERY_CHANNEL_NBC_NO_SANDBOX = if cfg.nbc.noSandbox then "1" else "0";
HOME = "/var/lib/every-channel";
}
// lib.optionalAttrs (cfg.nbc.mvpdUsernameFile != null) {
EVERY_CHANNEL_NBC_MVPD_USERNAME_FILE = toString cfg.nbc.mvpdUsernameFile;
}
// lib.optionalAttrs (cfg.nbc.mvpdPasswordFile != null) {
EVERY_CHANNEL_NBC_MVPD_PASSWORD_FILE = toString cfg.nbc.mvpdPasswordFile;
}
);
};
})
cfg.broadcasts)

View file

@ -54,6 +54,50 @@ in
description = "P2P listen port for op-node.";
};
ports = {
l2Http = lib.mkOption {
type = lib.types.port;
default = 28545;
description = "Local op-geth HTTP JSON-RPC port.";
};
l2Ws = lib.mkOption {
type = lib.types.port;
default = 28546;
description = "Local op-geth WebSocket JSON-RPC port.";
};
l2Auth = lib.mkOption {
type = lib.types.port;
default = 28551;
description = "Local op-geth Engine API port.";
};
l2P2p = lib.mkOption {
type = lib.types.port;
default = 28549;
description = "Local op-geth P2P port, kept away from the host Ethereum node's 30303.";
};
rollupRpc = lib.mkOption {
type = lib.types.port;
default = 28547;
description = "Local op-node rollup RPC port.";
};
batcherRpc = lib.mkOption {
type = lib.types.port;
default = 28548;
description = "Local op-batcher admin RPC port.";
};
proposerRpc = lib.mkOption {
type = lib.types.port;
default = 28560;
description = "Local op-proposer admin RPC port.";
};
};
openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
@ -87,12 +131,12 @@ in
images = {
opNode = lib.mkOption {
type = lib.types.str;
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.13.5";
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.16.6";
description = "Container image for op-node.";
};
opGeth = lib.mkOption {
type = lib.types.str;
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101511.1";
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101702.0-rc.1";
description = "Container image for op-geth.";
};
batcher = lib.mkOption {
@ -177,7 +221,7 @@ in
if [ ! -d /workspace/op-geth-data/geth/chaindata ]; then
geth init --datadir=/workspace/op-geth-data --state.scheme=hash /workspace/genesis.json
fi
exec geth --datadir=/workspace/op-geth-data --http --http.addr=127.0.0.1 --http.port=8545 --ws --ws.addr=127.0.0.1 --ws.port=8546 --authrpc.addr=127.0.0.1 --authrpc.port=8551 --authrpc.jwtsecret=/workspace/jwt.txt --syncmode=full --gcmode=archive --rollup.disabletxpoolgossip=true --http.vhosts=* --http.corsdomain=* --http.api=eth,net,web3,debug,txpool,admin --ws.origins=* --ws.api=eth,net,web3,debug,txpool,admin --authrpc.vhosts=*
exec geth --datadir=/workspace/op-geth-data --http --http.addr=127.0.0.1 --http.port=${toString cfg.ports.l2Http} --ws --ws.addr=127.0.0.1 --ws.port=${toString cfg.ports.l2Ws} --authrpc.addr=127.0.0.1 --authrpc.port=${toString cfg.ports.l2Auth} --authrpc.jwtsecret=/workspace/jwt.txt --port=${toString cfg.ports.l2P2p} --syncmode=full --gcmode=archive --rollup.disabletxpoolgossip=true --http.vhosts=* --http.corsdomain=* --http.api=eth,net,web3,debug,txpool,admin --ws.origins=* --ws.api=eth,net,web3,debug,txpool,admin --authrpc.vhosts=*
''
];
};
@ -198,7 +242,7 @@ in
exec op-node \
--l1="$L1_RPC_URL" \
--l1.beacon="$L1_BEACON_URL" \
--l2=http://127.0.0.1:8551 \
--l2=http://127.0.0.1:${toString cfg.ports.l2Auth} \
--l2.jwt-secret=/workspace/jwt.txt \
--rollup.config=/workspace/rollup.json \
--sequencer.enabled=true \
@ -213,7 +257,7 @@ in
--p2p.advertise.udp=${toString cfg.p2pListenPort} \
--p2p.sequencer.key="$PRIVATE_KEY" \
--rpc.addr=127.0.0.1 \
--rpc.port=8547 \
--rpc.port=${toString cfg.ports.rollupRpc} \
--rpc.enable-admin \
--log.level=info \
--log.format=json
@ -236,9 +280,8 @@ in
--l2-eth-rpc="$L2_RPC_URL" \
--rollup-rpc="$ROLLUP_RPC_URL" \
--private-key="$PRIVATE_KEY" \
--batch-inbox-address="$BATCH_INBOX_ADDRESS" \
--rpc.addr=127.0.0.1 \
--rpc.port=8548 \
--rpc.port=${toString cfg.ports.batcherRpc} \
--rpc.enable-admin \
--max-channel-duration=1 \
--data-availability-type=calldata \
@ -260,7 +303,7 @@ in
"-lc"
''
exec op-proposer \
--rpc.port=8560 \
--rpc.port=${toString cfg.ports.proposerRpc} \
--rollup-rpc="$ROLLUP_RPC_URL" \
--l1-eth-rpc="$L1_RPC_URL" \
--private-key="$PRIVATE_KEY" \
@ -347,6 +390,8 @@ in
export EVERY_CHANNEL_OP_STACK_L1_BEACON_URL=${lib.escapeShellArg cfg.l1BeaconUrl}
export EVERY_CHANNEL_OP_STACK_CHAIN_ID=${toString cfg.chainId}
export EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP=${lib.escapeShellArg cfg.p2pAdvertiseIp}
export EVERY_CHANNEL_OP_STACK_L2_RPC_URL=http://127.0.0.1:${toString cfg.ports.l2Http}
export EVERY_CHANNEL_OP_STACK_ROLLUP_RPC_URL=http://127.0.0.1:${toString cfg.ports.rollupRpc}
export EVERY_CHANNEL_OP_DEPLOYER_BIN=${lib.escapeShellArg "${cfg.rootDir}/bin/op-deployer"}
export EVERY_CHANNEL_OP_DEPLOYER_TAG=${lib.escapeShellArg cfg.opDeployerTag}
export EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT=${lib.escapeShellArg downloadScript}