Advance forge NBC worker and Ethereum full nodes

This commit is contained in:
every.channel 2026-04-03 02:01:34 -07:00
parent 7d84510eac
commit 3402f7dab2
No known key found for this signature in database
17 changed files with 3066 additions and 414 deletions

485
nix/modules/ec-ethereum.nix Normal file
View file

@ -0,0 +1,485 @@
{ lib, config, pkgs, ... }:
let
cfg = config.services.every-channel.ethereum;
mkNetworkSubmodule =
name: defaults:
{ ... }:
{
options = {
enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Whether to run the ${name} Ethereum execution and consensus pair.";
};
rootDir = lib.mkOption {
type = lib.types.str;
default = "${cfg.rootDir}/${name}";
description = "Persistent root directory for the ${name} node state.";
};
reth = {
httpPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethHttpPort;
description = "Local HTTP JSON-RPC port for the ${name} Reth node.";
};
wsPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethWsPort;
description = "Local WebSocket JSON-RPC port for the ${name} Reth node.";
};
authPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethAuthPort;
description = "Local Engine API port for the ${name} Reth node.";
};
p2pPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethP2pPort;
description = "RLPx/P2P TCP port for the ${name} Reth node.";
};
discoveryPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethDiscoveryPort;
description = "Discovery UDP port for the ${name} Reth node.";
};
metricsPort = lib.mkOption {
type = lib.types.port;
default = defaults.rethMetricsPort;
description = "Prometheus port for the ${name} Reth node.";
};
};
lighthouse = {
httpPort = lib.mkOption {
type = lib.types.port;
default = defaults.lighthouseHttpPort;
description = "Local Beacon API port for the ${name} Lighthouse node.";
};
p2pPort = lib.mkOption {
type = lib.types.port;
default = defaults.lighthouseP2pPort;
description = "TCP libp2p port for the ${name} Lighthouse node.";
};
discoveryPort = lib.mkOption {
type = lib.types.port;
default = defaults.lighthouseDiscoveryPort;
description = "UDP discovery port for the ${name} Lighthouse node.";
};
quicPort = lib.mkOption {
type = lib.types.port;
default = defaults.lighthouseQuicPort;
description = "UDP QUIC port for the ${name} Lighthouse node.";
};
metricsPort = lib.mkOption {
type = lib.types.port;
default = defaults.lighthouseMetricsPort;
description = "Prometheus port for the ${name} Lighthouse node.";
};
};
};
};
networks = {
mainnet = cfg.mainnet;
sepolia = cfg.sepolia;
};
enabledNetworks = lib.filterAttrs (_: networkCfg: networkCfg.enable) networks;
rethContainerName = network: "every-channel-ethereum-${network}-reth";
lighthouseContainerName = network: "every-channel-ethereum-${network}-lighthouse";
networkDatasetLines = lib.concatStringsSep "\n" (
lib.mapAttrsToList
(network: networkCfg: ''
ensure_dataset ${lib.escapeShellArg "${cfg.poolName}/${network}"}
ensure_dataset ${lib.escapeShellArg "${cfg.poolName}/${network}/reth"}
ensure_dataset ${lib.escapeShellArg "${cfg.poolName}/${network}/lighthouse"}
ensure_jwt ${lib.escapeShellArg "${networkCfg.rootDir}/jwt.hex"}
'')
enabledNetworks
);
mkNatArgs = lib.optionals (cfg.publicIp != null) [ "--nat" "extip:${cfg.publicIp}" ];
mkEnrArgs = lib.optionals (cfg.publicIp != null) [ "--enr-address" cfg.publicIp ];
mkRethContainer =
network: networkCfg: {
image = cfg.images.reth;
autoStart = true;
extraOptions = [ "--network=host" ];
volumes = [ "${networkCfg.rootDir}:/state" ];
cmd =
[
"node"
"--chain"
network
"--datadir"
"/state/reth"
"--full"
"--http"
"--http.addr"
"127.0.0.1"
"--http.port"
(toString networkCfg.reth.httpPort)
"--http.api"
"eth,net,web3,rpc"
"--ws"
"--ws.addr"
"127.0.0.1"
"--ws.port"
(toString networkCfg.reth.wsPort)
"--ws.api"
"eth,net,web3,rpc"
"--authrpc.addr"
"127.0.0.1"
"--authrpc.port"
(toString networkCfg.reth.authPort)
"--authrpc.jwtsecret"
"/state/jwt.hex"
"--port"
(toString networkCfg.reth.p2pPort)
"--discovery.port"
(toString networkCfg.reth.discoveryPort)
"--metrics"
"127.0.0.1:${toString networkCfg.reth.metricsPort}"
"--log.stdout.format"
"json"
]
++ mkNatArgs;
};
mkLighthouseContainer =
network: networkCfg: {
image = cfg.images.lighthouse;
autoStart = true;
extraOptions = [ "--network=host" ];
volumes = [ "${networkCfg.rootDir}:/state" ];
entrypoint = "/usr/local/bin/lighthouse";
cmd =
[
"beacon_node"
"--network"
network
"--datadir"
"/state/lighthouse"
"--http"
"--http-address"
"127.0.0.1"
"--http-port"
(toString networkCfg.lighthouse.httpPort)
"--execution-endpoint"
"http://127.0.0.1:${toString networkCfg.reth.authPort}"
"--execution-jwt"
"/state/jwt.hex"
"--allow-insecure-genesis-sync"
"--port"
(toString networkCfg.lighthouse.p2pPort)
"--discovery-port"
(toString networkCfg.lighthouse.discoveryPort)
"--quic-port"
(toString networkCfg.lighthouse.quicPort)
"--metrics"
"--metrics-address"
"127.0.0.1"
"--metrics-port"
(toString networkCfg.lighthouse.metricsPort)
]
++ mkEnrArgs;
};
caddyRootBody = ''
every.channel ethereum nodes
mainnet sync: /mainnet/sync
mainnet finality: /mainnet/finality
sepolia sync: /sepolia/sync
sepolia finality: /sepolia/finality
raw execution and beacon RPC remain local-only on ecp-forge for now.
'';
in
{
options.services.every-channel.ethereum = {
enable = lib.mkEnableOption "every.channel dual-network Ethereum full nodes";
poolName = lib.mkOption {
type = lib.types.str;
default = "eth";
description = "Dedicated ZFS pool name used for Ethereum node state.";
};
poolDevice = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Block device used to create the dedicated Ethereum ZFS pool if it does not already exist.";
};
rootDir = lib.mkOption {
type = lib.types.str;
default = "/eth";
description = "Mountpoint for the dedicated Ethereum ZFS pool.";
};
publicIp = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Public IP to advertise in Ethereum P2P metadata.";
};
publicHost = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Optional HTTPS host that publishes node sync and finality surfaces.";
};
images = {
reth = lib.mkOption {
type = lib.types.str;
default = "ghcr.io/paradigmxyz/reth:v1.9.3";
description = "Pinned Reth OCI image.";
};
lighthouse = lib.mkOption {
type = lib.types.str;
default = "docker.io/sigp/lighthouse:v8.1.1";
description = "Pinned Lighthouse OCI image.";
};
};
mainnet = lib.mkOption {
type = lib.types.submodule (mkNetworkSubmodule "mainnet" {
rethHttpPort = 8545;
rethWsPort = 8546;
rethAuthPort = 8551;
rethP2pPort = 30303;
rethDiscoveryPort = 30303;
rethMetricsPort = 19001;
lighthouseHttpPort = 5052;
lighthouseP2pPort = 9000;
lighthouseDiscoveryPort = 9000;
lighthouseQuicPort = 9001;
lighthouseMetricsPort = 5054;
});
default = { };
description = "Mainnet Ethereum node configuration.";
};
sepolia = lib.mkOption {
type = lib.types.submodule (mkNetworkSubmodule "sepolia" {
rethHttpPort = 18545;
rethWsPort = 18546;
rethAuthPort = 18551;
rethP2pPort = 31303;
rethDiscoveryPort = 31303;
rethMetricsPort = 29001;
lighthouseHttpPort = 15052;
lighthouseP2pPort = 19000;
lighthouseDiscoveryPort = 19000;
lighthouseQuicPort = 19001;
lighthouseMetricsPort = 15054;
});
default = { };
description = "Sepolia Ethereum node configuration.";
};
};
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = cfg.poolDevice != null;
message = "services.every-channel.ethereum.poolDevice must be set when the Ethereum node is enabled";
}
{
assertion = enabledNetworks != { };
message = "At least one Ethereum network must be enabled";
}
];
boot.zfs.extraPools = [ cfg.poolName ];
networking.firewall = {
allowedTCPPorts =
lib.flatten (
lib.mapAttrsToList
(_: networkCfg: [
networkCfg.reth.p2pPort
networkCfg.lighthouse.p2pPort
])
enabledNetworks
);
allowedUDPPorts =
lib.flatten (
lib.mapAttrsToList
(_: networkCfg: [
networkCfg.reth.discoveryPort
networkCfg.lighthouse.discoveryPort
networkCfg.lighthouse.quicPort
])
enabledNetworks
);
};
virtualisation.oci-containers.containers =
(lib.mapAttrs'
(network: networkCfg:
lib.nameValuePair (rethContainerName network) (mkRethContainer network networkCfg))
enabledNetworks)
// (lib.mapAttrs'
(network: networkCfg:
lib.nameValuePair (lighthouseContainerName network) (mkLighthouseContainer network networkCfg))
enabledNetworks);
systemd.services =
{
every-channel-ethereum-storage = {
description = "every.channel Ethereum NVMe ZFS pool and dataset bootstrap";
wantedBy = [ "multi-user.target" ];
after = [ "local-fs.target" "zfs.target" ];
wants = [ "zfs.target" ];
before =
lib.flatten (
lib.mapAttrsToList
(network: _: [
"podman-${rethContainerName network}.service"
"podman-${lighthouseContainerName network}.service"
])
enabledNetworks
);
path = with pkgs; [
coreutils
openssl
util-linux
zfs
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
set -euo pipefail
pool=${lib.escapeShellArg cfg.poolName}
root_dir=${lib.escapeShellArg cfg.rootDir}
device=${lib.escapeShellArg cfg.poolDevice}
ensure_dataset() {
local dataset="$1"
if ! zfs list -H "$dataset" >/dev/null 2>&1; then
zfs create -p "$dataset"
fi
zfs set atime=off compression=lz4 xattr=sa "$dataset" >/dev/null
}
ensure_jwt() {
local path="$1"
if [[ ! -s "$path" ]]; then
umask 077
openssl rand -hex 32 | tr -d '\n' > "$path"
printf '\n' >> "$path"
fi
chmod 0400 "$path"
}
if ! zpool list -H "$pool" >/dev/null 2>&1; then
if [[ -z "$device" ]]; then
echo "every-channel-ethereum-storage: missing poolDevice for pool $pool" >&2
exit 1
fi
if [[ ! -b "$device" ]]; then
echo "every-channel-ethereum-storage: device $device not present" >&2
exit 1
fi
if blkid "$device" >/dev/null 2>&1; then
echo "every-channel-ethereum-storage: device $device already has signatures; refusing to overwrite automatically" >&2
exit 1
fi
zpool create -f \
-o ashift=12 \
-O mountpoint="$root_dir" \
-O atime=off \
-O compression=lz4 \
-O xattr=sa \
"$pool" "$device"
else
zfs set mountpoint="$root_dir" "$pool" >/dev/null
fi
${networkDatasetLines}
'';
};
}
// (lib.mapAttrs'
(network: networkCfg:
lib.nameValuePair "podman-${rethContainerName network}" {
after = [ "network-online.target" "every-channel-ethereum-storage.service" ];
wants = [ "network-online.target" "every-channel-ethereum-storage.service" ];
requires = [ "every-channel-ethereum-storage.service" ];
unitConfig.RequiresMountsFor = [ networkCfg.rootDir ];
})
enabledNetworks)
// (lib.mapAttrs'
(network: networkCfg:
lib.nameValuePair "podman-${lighthouseContainerName network}" {
after = [
"network-online.target"
"every-channel-ethereum-storage.service"
"podman-${rethContainerName network}.service"
];
wants = [
"network-online.target"
"every-channel-ethereum-storage.service"
"podman-${rethContainerName network}.service"
];
requires = [
"every-channel-ethereum-storage.service"
"podman-${rethContainerName network}.service"
];
unitConfig.RequiresMountsFor = [ networkCfg.rootDir ];
})
enabledNetworks);
services.caddy.virtualHosts = lib.mkIf (cfg.publicHost != null) {
"${cfg.publicHost}".extraConfig = ''
encode zstd gzip
handle /mainnet/sync {
uri replace /mainnet/sync /eth/v1/node/syncing
reverse_proxy http://127.0.0.1:${toString cfg.mainnet.lighthouse.httpPort}
}
handle /mainnet/finality {
uri replace /mainnet/finality /eth/v1/beacon/states/head/finality_checkpoints
reverse_proxy http://127.0.0.1:${toString cfg.mainnet.lighthouse.httpPort}
}
handle /sepolia/sync {
uri replace /sepolia/sync /eth/v1/node/syncing
reverse_proxy http://127.0.0.1:${toString cfg.sepolia.lighthouse.httpPort}
}
handle /sepolia/finality {
uri replace /sepolia/finality /eth/v1/beacon/states/head/finality_checkpoints
reverse_proxy http://127.0.0.1:${toString cfg.sepolia.lighthouse.httpPort}
}
handle {
header Content-Type text/plain
respond "${caddyRootBody}" 200
}
'';
};
};
}

View file

@ -266,6 +266,70 @@ in
};
};
nbc = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable Linux Chrome + virtual-display support for NBC browser-backed broadcasts.";
};
chromeBinary = lib.mkOption {
type = lib.types.str;
default = "/run/current-system/sw/bin/google-chrome-stable";
description = "Chrome binary used by `ec-node nbc-bootstrap` and `ec-node nbc-wt-publish`.";
};
profileDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/every-channel/nbc-profile";
description = "Persistent Chrome profile directory used for NBC / Adobe auth sessions.";
};
authScreenshotDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/every-channel/nbc-auth";
description = "Directory for operator-facing screenshots when bootstrap hits an interactive auth page.";
};
display = lib.mkOption {
type = lib.types.str;
default = ":99";
description = "DISPLAY used by the NBC virtual display session.";
};
screen = lib.mkOption {
type = lib.types.str;
default = "1920x1080x24";
description = "Xvfb screen geometry for the NBC virtual display.";
};
noSandbox = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Pass `EVERY_CHANNEL_NBC_NO_SANDBOX=1` for Chrome worker sessions.";
};
vnc = {
enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Expose the NBC virtual display over VNC so auth can be completed remotely when needed.";
};
listen = lib.mkOption {
type = lib.types.str;
default = "127.0.0.1";
description = "x11vnc listen address for the NBC virtual display.";
};
port = lib.mkOption {
type = lib.types.port;
default = 5900;
description = "x11vnc TCP port for the NBC virtual display.";
};
};
};
broadcasts = lib.mkOption {
type = lib.types.listOf (lib.types.submodule {
options = {
@ -283,10 +347,15 @@ in
default = null;
description = "Optional explicit ffmpeg input URL/file. When set, HDHomeRun settings are ignored for this broadcast.";
};
nbcUrl = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Optional NBC watch/live URL for a browser-backed relay publish worker.";
};
};
});
default = [ ];
description = "List of broadcasts (name + channel, or explicit input) to publish.";
description = "List of broadcasts (HDHomeRun, explicit input, or NBC browser-backed URL) to publish.";
};
};
@ -299,7 +368,7 @@ in
{
assertion =
let
needsHdhr = builtins.any (b: b.input == null) cfg.broadcasts;
needsHdhr = builtins.any (b: b.input == null && b.nbcUrl == null) cfg.broadcasts;
in
(!needsHdhr) || (cfg.hdhomerun.host != null) || (cfg.hdhomerun.deviceId != null);
message = "Set services.every-channel.ec-node.hdhomerun.host or .deviceId (required when any broadcast omits `input`)";
@ -309,8 +378,20 @@ in
message = "hdhomerun.autoDiscover only applies when hdhomerun.host is unset";
}
{
assertion = builtins.all (b: (b.input != null) || (b.channel != null)) cfg.broadcasts;
message = "Each broadcast must set either `input` or `channel`";
assertion =
builtins.all
(b:
(lib.length (lib.filter (value: value != null) [ b.input b.channel b.nbcUrl ])) == 1)
cfg.broadcasts;
message = "Each broadcast must set exactly one of `input`, `channel`, or `nbcUrl`";
}
{
assertion =
let
hasNbcBroadcast = builtins.any (b: b.nbcUrl != null) cfg.broadcasts;
in
(!hasNbcBroadcast) || cfg.nbc.enable;
message = "Set services.every-channel.ec-node.nbc.enable = true before configuring `broadcasts.*.nbcUrl`";
}
];
@ -318,16 +399,30 @@ in
[
"d /run/every-channel 1777 root root - -"
]
++ lib.optionals cfg.nbc.enable [
"d /var/lib/every-channel 0750 every-channel every-channel - -"
"d ${cfg.nbc.profileDir} 0750 every-channel every-channel - -"
"d ${cfg.nbc.authScreenshotDir} 0750 every-channel every-channel - -"
]
++ lib.optionals cfg.archive.enable [
"d ${cfg.archive.outputDir} 0750 root root - -"
"d ${cfg.archive.manifestDir} 0750 root root - -"
];
users.groups.every-channel = lib.mkIf cfg.nbc.enable { };
users.users.every-channel = lib.mkIf cfg.nbc.enable {
isSystemUser = true;
group = "every-channel";
home = "/var/lib/every-channel";
createHome = true;
};
systemd.services =
lib.listToAttrs (map
(b:
let
unit = "every-channel-wt-publish-${sanitizeUnitName b.name}";
isNbc = b.nbcUrl != null;
runner = pkgs.writeShellApplication {
name = unit;
runtimeInputs =
@ -352,6 +447,7 @@ in
"cmd+=(${lib.concatStringsSep " " (map lib.escapeShellArg cfg.extraArgs)})";
explicitInputStr = if b.input == null then "" else b.input;
channelStr = if b.channel == null then "" else b.channel;
nbcUrlStr = if b.nbcUrl == null then "" else b.nbcUrl;
controlEndpointOutPath = "/run/every-channel/control-peer-${sanitizeUnitName b.name}.json";
controlDiscoveryStr = if cfg.control.discovery == null then "" else cfg.control.discovery;
controlIrohSecretStr = if cfg.control.irohSecret == null then "" else cfg.control.irohSecret;
@ -360,114 +456,122 @@ in
''
set -euo pipefail
nbc_url=${lib.escapeShellArg nbcUrlStr}
input=""
explicit_input=${lib.escapeShellArg explicitInputStr}
if [[ -n "$explicit_input" ]]; then
input="$explicit_input"
else
ch=${lib.escapeShellArg channelStr}
if [[ -z "$ch" ]]; then
echo "ec-node: broadcast missing both input and channel" >&2
exit 2
fi
# Note: don't wrap lib.escapeShellArg in double-quotes, otherwise empty strings
# become a literal two-quote token and break discovery.
base=${lib.escapeShellArg fixedHost}
if [[ -z "$base" ]]; then
dev_id=${lib.escapeShellArg deviceId}
if [[ -z "$dev_id" ]]; then
echo "ec-node: missing hdhomerun.host and hdhomerun.deviceId" >&2
if [[ -z "$nbc_url" ]]; then
explicit_input=${lib.escapeShellArg explicitInputStr}
if [[ -n "$explicit_input" ]]; then
input="$explicit_input"
else
ch=${lib.escapeShellArg channelStr}
if [[ -z "$ch" ]]; then
echo "ec-node: broadcast missing input, channel, and nbcUrl" >&2
exit 2
fi
try_ip() {
local ip="$1"
local json id base_url
json="$(curl -fsS --connect-timeout 0.10 --max-time 0.20 "http://$ip/discover.json" 2>/dev/null || true)"
if [[ -z "$json" ]]; then
return 1
fi
id="$(printf '%s' "$json" | jq -r '.DeviceID // empty' 2>/dev/null || true)"
if [[ "$id" != "$dev_id" ]]; then
return 1
fi
base_url="$(printf '%s' "$json" | jq -r '.BaseURL // empty' 2>/dev/null || true)"
if [[ -z "$base_url" ]]; then
base_url="http://$ip"
fi
printf '%s\n' "$base_url"
return 0
}
if ${lib.boolToString cfg.hdhomerun.autoDiscover}; then
# Primary: UDP broadcast discover.
base="$(${cfg.discoveryPackage}/bin/ec-cli discover | jq -r --arg id "$dev_id" '.[] | select(.id == $id) | .base_url // empty' | head -n1 || true)"
# Fallback: probe known neighbors for /discover.json (fast; avoids full /24 scan).
if [[ -z "$base" ]]; then
while read -r ip; do
found="$(try_ip "$ip" || true)"
if [[ -n "$found" ]]; then
base="$found"
break
fi
done < <(ip neigh | awk '{print $1}' | sort -u)
# Note: don't wrap lib.escapeShellArg in double-quotes, otherwise empty strings
# become a literal two-quote token and break discovery.
base=${lib.escapeShellArg fixedHost}
if [[ -z "$base" ]]; then
dev_id=${lib.escapeShellArg deviceId}
if [[ -z "$dev_id" ]]; then
echo "ec-node: missing hdhomerun.host and hdhomerun.deviceId" >&2
exit 2
fi
# Fallback: scan local /24 subnets for /discover.json (slow; worst-case ~50s).
if [[ -z "$base" ]]; then
while read -r cidr; do
ip_addr="''${cidr%/*}"
prefix="''${cidr#*/}"
if [[ "$prefix" != "24" ]]; then
continue
fi
net="''${ip_addr%.*}"
for i in $(seq 1 254); do
found="$(try_ip "$net.$i" || true)"
try_ip() {
local ip="$1"
local json id base_url
json="$(curl -fsS --connect-timeout 0.10 --max-time 0.20 "http://$ip/discover.json" 2>/dev/null || true)"
if [[ -z "$json" ]]; then
return 1
fi
id="$(printf '%s' "$json" | jq -r '.DeviceID // empty' 2>/dev/null || true)"
if [[ "$id" != "$dev_id" ]]; then
return 1
fi
base_url="$(printf '%s' "$json" | jq -r '.BaseURL // empty' 2>/dev/null || true)"
if [[ -z "$base_url" ]]; then
base_url="http://$ip"
fi
printf '%s\n' "$base_url"
return 0
}
if ${lib.boolToString cfg.hdhomerun.autoDiscover}; then
base="$(${cfg.discoveryPackage}/bin/ec-cli discover | jq -r --arg id "$dev_id" '.[] | select(.id == $id) | .base_url // empty' | head -n1 || true)"
if [[ -z "$base" ]]; then
while read -r ip; do
found="$(try_ip "$ip" || true)"
if [[ -n "$found" ]]; then
base="$found"
break
fi
done
if [[ -n "$base" ]]; then
break
fi
done < <(ip -o -4 addr show scope global | awk '{print $4}')
fi
done < <(ip neigh | awk '{print $1}' | sort -u)
fi
if [[ -z "$base" ]]; then
echo "ec-node: HDHomeRun deviceId not found: $dev_id" >&2
exit 2
if [[ -z "$base" ]]; then
while read -r cidr; do
ip_addr="''${cidr%/*}"
prefix="''${cidr#*/}"
if [[ "$prefix" != "24" ]]; then
continue
fi
net="''${ip_addr%.*}"
for i in $(seq 1 254); do
found="$(try_ip "$net.$i" || true)"
if [[ -n "$found" ]]; then
base="$found"
break
fi
done
if [[ -n "$base" ]]; then
break
fi
done < <(ip -o -4 addr show scope global | awk '{print $4}')
fi
if [[ -z "$base" ]]; then
echo "ec-node: HDHomeRun deviceId not found: $dev_id" >&2
exit 2
fi
else
base="http://$dev_id.local"
fi
else
# Best-effort mDNS convention.
base="http://$dev_id.local"
fi
fi
base="''${base%/}"
if [[ "$base" != http://* && "$base" != https://* ]]; then
base="http://$base"
fi
base="''${base%/}"
if [[ "$base" != http://* && "$base" != https://* ]]; then
base="http://$base"
fi
# HDHomeRun streaming is on port 5004, regardless of the discover BaseURL.
hostport="''${base#http://}"
hostport="''${hostport#https://}"
hostport="''${hostport%%/*}"
host="''${hostport%%:*}"
input="http://$host:5004/auto/v$ch"
hostport="''${base#http://}"
hostport="''${hostport#https://}"
hostport="''${hostport%%/*}"
host="''${hostport%%:*}"
input="http://$host:5004/auto/v$ch"
fi
fi
cmd=(
${lib.escapeShellArg "${cfg.package}/bin/ec-node"}
wt-publish
--url ${lib.escapeShellArg cfg.relayUrl}
--name ${lib.escapeShellArg b.name}
--input "$input"
)
${lib.optionalString (!cfg.transcode) "cmd+=(--transcode=false)"}
if [[ -n "$nbc_url" ]]; then
cmd=(
${lib.escapeShellArg "${cfg.package}/bin/ec-node"}
nbc-wt-publish
--url ${lib.escapeShellArg cfg.relayUrl}
--name ${lib.escapeShellArg b.name}
--source-url "$nbc_url"
)
else
cmd=(
${lib.escapeShellArg "${cfg.package}/bin/ec-node"}
wt-publish
--url ${lib.escapeShellArg cfg.relayUrl}
--name ${lib.escapeShellArg b.name}
--input "$input"
)
${lib.optionalString (!cfg.transcode) "cmd+=(--transcode=false)"}
fi
${lib.optionalString (!cfg.passthrough) "cmd+=(--passthrough=false)"}
${lib.optionalString cfg.tlsDisableVerify "cmd+=(--tls-disable-verify)"}
${lib.optionalString cfg.control.enable ''
@ -503,8 +607,12 @@ in
value = {
description = "every.channel WebTransport publish (${b.name} -> ${cfg.relayUrl})";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
after =
[ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ];
wants =
[ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ];
# Keep the unit from entering "failed" due to rapid restarts (deploy-flake treats
# failed units during `switch-to-configuration test` as a deployment failure).
@ -521,23 +629,34 @@ in
Restart = "always";
RestartSec = 2;
DynamicUser = true;
DynamicUser = !isNbc;
User = lib.mkIf isNbc "every-channel";
Group = lib.mkIf isNbc "every-channel";
NoNewPrivileges = true;
PrivateTmp = true;
PrivateTmp = !isNbc;
ProtectSystem = "strict";
ProtectHome = true;
ProtectHome = !isNbc;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
MemoryDenyWriteExecute = !isNbc;
RestrictSUIDSGID = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
ReadWritePaths = lib.optionals cfg.control.enable [ "/run/every-channel" ];
ReadWritePaths =
lib.optionals cfg.control.enable [ "/run/every-channel" ]
++ lib.optionals isNbc [ cfg.nbc.profileDir cfg.nbc.authScreenshotDir ];
};
environment = cfg.environment;
environment =
cfg.environment
// lib.optionalAttrs isNbc {
DISPLAY = cfg.nbc.display;
EVERY_CHANNEL_NBC_CHROME_PATH = cfg.nbc.chromeBinary;
EVERY_CHANNEL_NBC_PROFILE_DIR = cfg.nbc.profileDir;
EVERY_CHANNEL_NBC_NO_SANDBOX = if cfg.nbc.noSandbox then "1" else "0";
};
};
})
cfg.broadcasts)
@ -852,6 +971,112 @@ in
environment = cfg.environment;
};
});
})
// lib.optionalAttrs cfg.nbc.enable
(let
displayUnit = "every-channel-nbc-display";
displayNumber = lib.strings.removePrefix ":" cfg.nbc.display;
displayRunner = pkgs.writeShellApplication {
name = displayUnit;
runtimeInputs = [
pkgs.coreutils
pkgs.xorg.xorgserver
];
text = ''
set -euo pipefail
exec ${pkgs.xorg.xorgserver}/bin/Xvfb ${lib.escapeShellArg cfg.nbc.display} \
-screen 0 ${lib.escapeShellArg cfg.nbc.screen} \
-nolisten tcp \
-ac \
+extension RANDR
'';
};
vncUnit = "every-channel-nbc-vnc";
vncRunner = pkgs.writeShellApplication {
name = vncUnit;
runtimeInputs = [
pkgs.x11vnc
];
text = ''
set -euo pipefail
exec ${pkgs.x11vnc}/bin/x11vnc \
-display ${lib.escapeShellArg cfg.nbc.display} \
-forever \
-shared \
-nopw \
-listen ${lib.escapeShellArg cfg.nbc.vnc.listen} \
-rfbport ${toString cfg.nbc.vnc.port}
'';
};
in
({
"${displayUnit}" = {
description = "every.channel NBC virtual display";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${displayRunner}/bin/${displayUnit}";
Restart = "always";
RestartSec = 2;
User = "every-channel";
Group = "every-channel";
NoNewPrivileges = true;
PrivateTmp = false;
ProtectSystem = "strict";
ProtectHome = false;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
LockPersonality = true;
MemoryDenyWriteExecute = false;
RestrictSUIDSGID = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
ReadWritePaths = [ "/tmp" "/var/lib/every-channel" ];
};
environment = cfg.environment // {
HOME = "/var/lib/every-channel";
};
};
}
// lib.optionalAttrs cfg.nbc.vnc.enable {
"${vncUnit}" = {
description = "every.channel NBC virtual display VNC bridge";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "${displayUnit}.service" ];
wants = [ "network-online.target" "${displayUnit}.service" ];
serviceConfig = {
Type = "simple";
ExecStart = "${vncRunner}/bin/${vncUnit}";
Restart = "always";
RestartSec = 2;
User = "every-channel";
Group = "every-channel";
NoNewPrivileges = true;
PrivateTmp = false;
ProtectSystem = "strict";
ProtectHome = false;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
LockPersonality = true;
MemoryDenyWriteExecute = false;
RestrictSUIDSGID = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
ReadWritePaths = [ "/tmp" "/var/lib/every-channel" ];
};
environment = cfg.environment // {
DISPLAY = cfg.nbc.display;
HOME = "/var/lib/every-channel";
};
};
}));
};
}

View file

@ -11,6 +11,12 @@ in
./ecp-forge-hardware.nix
];
nixpkgs.config.allowUnfreePredicate = pkg:
builtins.elem (lib.getName pkg) [
"google-chrome"
"google-chrome-stable"
];
networking = {
hostName = "ecp-forge";
hostId = "007f0200";
@ -252,6 +258,24 @@ in
services.every-channel.ec-node = {
enable = true;
nbc = {
enable = true;
chromeBinary = "${pkgs.google-chrome}/bin/google-chrome-stable";
display = ":120";
screen = "1920x1080x24";
noSandbox = true;
vnc = {
enable = true;
listen = "127.0.0.1";
port = 5900;
};
};
broadcasts = [
{
name = "forge-nbc-sports-philly";
nbcUrl = "https://www.nbc.com/live?brand=nbc-sports-philadelphia";
}
];
archive = {
enable = true;
outputDir = "/tank/every-channel/archive";
@ -261,6 +285,25 @@ in
};
};
services.every-channel.ethereum = {
enable = true;
poolName = "eth";
poolDevice = "/dev/disk/by-id/nvme-eui.01000000000000008ce38ee307de5c01";
rootDir = "/eth";
publicIp = "95.216.114.54";
publicHost = "eth.every.channel";
};
services.mullvad-vpn = {
enable = true;
enableExcludeWrapper = true;
};
systemd.services.every-channel-wt-publish-forge-nbc-sports-philly = {
after = [ "mullvad-daemon.service" ];
wants = [ "mullvad-daemon.service" ];
};
services.every-channel.op-stack = {
enable = hasOpStackSepoliaKey;
challengerEnable = hasOpStackChallengerPrestate;
@ -276,13 +319,20 @@ in
p2pAdvertiseIp = "95.216.114.54";
};
environment.systemPackages = with pkgs; [
git
htop
jq
tmux
zfs
];
environment.systemPackages =
(with pkgs; [
git
google-chrome
htop
jq
mullvad-vpn
tmux
x11vnc
zfs
])
++ [
config.services.every-channel.ec-node.package
];
system.stateVersion = "22.11";
}

View file

@ -12,7 +12,17 @@ let
let
base = baseNameOf path;
in
!(base == "target" || base == ".git" || base == ".direnv" || base == "tmp" || base == "node_modules");
!(base == "target"
|| base == ".git"
|| base == ".direnv"
|| base == "tmp"
|| base == "node_modules"
|| base == "out"
|| base == "test-results"
|| base == "deploy"
|| base == "intake"
|| base == "cache"
|| base == ".tower-minimal");
};
in
rustPlatform.buildRustPackage {

View file

@ -1,5 +1,6 @@
{ lib
, rustPlatform
, rustfmt
, stdenv
, pkg-config
, openssl
@ -14,7 +15,17 @@ let
base = baseNameOf path;
in
# Skip typical build outputs and large scratch dirs.
!(base == "target" || base == ".git" || base == ".direnv" || base == "tmp" || base == "node_modules");
!(base == "target"
|| base == ".git"
|| base == ".direnv"
|| base == "tmp"
|| base == "node_modules"
|| base == "out"
|| base == "test-results"
|| base == "deploy"
|| base == "intake"
|| base == "cache"
|| base == ".tower-minimal");
};
in
rustPlatform.buildRustPackage {
@ -30,6 +41,7 @@ rustPlatform.buildRustPackage {
nativeBuildInputs = [
pkg-config
rustfmt
];
buildInputs =