Wire HDHomeRun observations and recover Forge OP Stack

This commit is contained in:
every.channel 2026-05-03 20:24:04 -07:00
parent 8065860449
commit 0d86104762
No known key found for this signature in database
18 changed files with 1613 additions and 58 deletions

View file

@ -17,6 +17,8 @@ pub const SCHEME_MANIFEST_DATA_ROOT: &str = "manifest-data-merkle-keccak256-v1";
pub const SCHEME_MANIFEST_BODY_ABI: &str = "manifest-body-abi-keccak256-v1"; pub const SCHEME_MANIFEST_BODY_ABI: &str = "manifest-body-abi-keccak256-v1";
pub const SCHEME_MANIFEST_ENVELOPE_ABI: &str = "manifest-envelope-abi-keccak256-v1"; pub const SCHEME_MANIFEST_ENVELOPE_ABI: &str = "manifest-envelope-abi-keccak256-v1";
pub const ETH_MANIFEST_SIG_ALG: &str = "secp256k1-eip712-manifest-body-v1"; pub const ETH_MANIFEST_SIG_ALG: &str = "secp256k1-eip712-manifest-body-v1";
pub const ZERO_B256_HEX: &str =
"0x0000000000000000000000000000000000000000000000000000000000000000";
sol! { sol! {
struct EthStreamMetadata { struct EthStreamMetadata {
@ -117,6 +119,16 @@ sol! {
bytes32 manifestId; bytes32 manifestId;
EthManifestSignature[] signatures; EthManifestSignature[] signatures;
} }
struct EthObservationHeader {
bytes32 streamHash;
bytes32 epochHash;
bytes32 parentObservationHash;
bytes32 dataRoot;
bytes32 locatorHash;
uint64 observedUnixMs;
uint64 sequence;
}
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -142,7 +154,7 @@ fn commitment(scheme: &str, digest: B256) -> ChainCommitment {
ChainCommitment { ChainCommitment {
chain: ETHEREUM_CHAIN.to_string(), chain: ETHEREUM_CHAIN.to_string(),
scheme: scheme.to_string(), scheme: scheme.to_string(),
digest: format!("0x{}", hex::encode(digest)), digest: b256_hex(digest),
} }
} }
@ -150,7 +162,15 @@ fn abi_commitment<T: SolValue>(scheme: &str, value: &T) -> ChainCommitment {
commitment(scheme, keccak256(value.abi_encode())) commitment(scheme, keccak256(value.abi_encode()))
} }
fn parse_b256(value: &str) -> Result<B256, EthCommitmentError> { pub fn b256_hex(value: B256) -> String {
format!("0x{}", hex::encode(value))
}
pub fn keccak256_bytes_hex(value: &[u8]) -> String {
b256_hex(keccak256(value))
}
pub fn parse_b256(value: &str) -> Result<B256, EthCommitmentError> {
let trimmed = value.trim().strip_prefix("0x").unwrap_or(value.trim()); let trimmed = value.trim().strip_prefix("0x").unwrap_or(value.trim());
let bytes = let bytes =
hex::decode(trimmed).map_err(|_| EthCommitmentError::InvalidHex(value.to_string()))?; hex::decode(trimmed).map_err(|_| EthCommitmentError::InvalidHex(value.to_string()))?;
@ -540,6 +560,49 @@ pub fn manifest_commitments(value: &Manifest) -> Result<Vec<ChainCommitment>, Et
]) ])
} }
pub fn manifest_commitment_digest(
value: &Manifest,
scheme: &str,
) -> Result<Option<String>, EthCommitmentError> {
Ok(manifest_commitments(value)?
.into_iter()
.find(|commitment| commitment.scheme == scheme)
.map(|commitment| commitment.digest))
}
pub fn manifest_observation_header(
value: &Manifest,
parent_observation_hash: Option<&str>,
locator_hash: &str,
sequence: u64,
) -> Result<EthObservationHeader, EthCommitmentError> {
let data_root = manifest_commitment_digest(value, SCHEME_MANIFEST_DATA_ROOT)?
.ok_or(EthCommitmentError::Empty)?;
Ok(EthObservationHeader {
streamHash: keccak256(value.body.stream_id.0.as_bytes()),
epochHash: keccak256(value.body.epoch_id.as_bytes()),
parentObservationHash: parse_b256(parent_observation_hash.unwrap_or(ZERO_B256_HEX))?,
dataRoot: parse_b256(&data_root)?,
locatorHash: parse_b256(locator_hash)?,
observedUnixMs: value.body.created_unix_ms,
sequence,
})
}
pub fn observation_header_hash(value: &EthObservationHeader) -> String {
b256_hex(keccak256(value.abi_encode()))
}
pub fn observation_slot_hash(
stream_hash: &str,
epoch_hash: &str,
) -> Result<String, EthCommitmentError> {
let stream_hash = parse_b256(stream_hash)?;
let epoch_hash = parse_b256(epoch_hash)?;
Ok(b256_hex(keccak256((stream_hash, epoch_hash).abi_encode())))
}
pub fn manifest_commitments_match(value: &Manifest) -> Result<bool, EthCommitmentError> { pub fn manifest_commitments_match(value: &Manifest) -> Result<bool, EthCommitmentError> {
let present = value let present = value
.commitments .commitments
@ -621,6 +684,37 @@ mod tests {
assert_eq!(h1, h2); assert_eq!(h1, h2);
} }
#[test]
fn manifest_observation_header_uses_manifest_data_root() {
let body = sample_body();
let manifest_id = body.manifest_id().unwrap();
let mut manifest = Manifest {
body,
manifest_id,
signatures: Vec::new(),
commitments: Vec::new(),
};
manifest.commitments = manifest_commitments(&manifest).unwrap();
let locator_hash = keccak256_bytes_hex(b"locator");
let header = manifest_observation_header(&manifest, None, &locator_hash, 7).unwrap();
assert_eq!(header.sequence, 7);
assert_eq!(header.observedUnixMs, manifest.body.created_unix_ms);
assert_eq!(
b256_hex(header.dataRoot),
manifest_commitment_digest(&manifest, SCHEME_MANIFEST_DATA_ROOT)
.unwrap()
.unwrap()
);
assert_eq!(
observation_slot_hash(&b256_hex(header.streamHash), &b256_hex(header.epochHash))
.unwrap(),
b256_hex(keccak256(
(header.streamHash, header.epochHash).abi_encode()
))
);
}
#[test] #[test]
fn stream_descriptor_commitments_include_stream_id_and_descriptor_hashes() { fn stream_descriptor_commitments_include_stream_id_and_descriptor_hashes() {
let descriptor = StreamDescriptor { let descriptor = StreamDescriptor {

View file

@ -0,0 +1,220 @@
use anyhow::{anyhow, Context, Result};
use ec_core::Manifest;
use ec_eth::{
b256_hex, keccak256_bytes_hex, manifest_observation_header, observation_header_hash,
observation_slot_hash,
};
use serde::Serialize;
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
use tokio::process::Command;
pub const OBSERVATION_RPC_URL_ENV: &str = "EVERY_CHANNEL_OBSERVATION_RPC_URL";
pub const OBSERVATION_LEDGER_ENV: &str = "EVERY_CHANNEL_OBSERVATION_LEDGER";
pub const OBSERVATION_PRIVATE_KEY_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY";
pub const OBSERVATION_PRIVATE_KEY_FILE_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY_FILE";
pub const OBSERVATION_PARENT_HASH_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PARENT_HASH";
#[derive(Debug, Clone)]
pub struct ObservationSinkOptions {
pub rpc_url: Option<String>,
pub ledger: Option<String>,
pub private_key: Option<String>,
pub private_key_file: Option<PathBuf>,
pub parent_hash: Option<String>,
pub timeout_ms: u64,
}
#[derive(Debug, Clone)]
pub struct ObservationSink {
rpc_url: String,
ledger: String,
private_key: String,
parent_hash: Option<String>,
timeout: Duration,
}
#[derive(Debug, Clone, Serialize)]
pub struct ManifestObservationLocator {
pub transport: String,
pub broadcast_name: String,
pub track_name: String,
pub manifest_track: String,
pub stream_id: String,
pub epoch_id: String,
pub manifest_id: String,
}
#[derive(Debug, Clone)]
pub struct SubmittedObservation {
pub observation_hash: String,
pub slot_hash: String,
pub stream_hash: String,
pub epoch_hash: String,
pub data_root: String,
pub locator_hash: String,
pub sequence: u64,
}
fn env_value(name: &str) -> Option<String> {
std::env::var(name)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
fn normalize_private_key(value: &str) -> Result<String> {
let trimmed = value.trim().strip_prefix("0x").unwrap_or(value.trim());
let bytes = hex::decode(trimmed).context("observation private key must be hex")?;
if bytes.len() != 32 {
return Err(anyhow!("observation private key must be 32 bytes"));
}
Ok(format!("0x{}", hex::encode(bytes)))
}
fn read_private_key_file(path: &PathBuf) -> Result<String> {
fs::read_to_string(path)
.with_context(|| format!("failed to read observation private key {}", path.display()))
.and_then(|value| normalize_private_key(&value))
}
impl ObservationSink {
pub fn from_options(options: ObservationSinkOptions) -> Result<Option<Self>> {
let rpc_url = options
.rpc_url
.or_else(|| env_value(OBSERVATION_RPC_URL_ENV));
let ledger = options.ledger.or_else(|| env_value(OBSERVATION_LEDGER_ENV));
let private_key = options
.private_key
.or_else(|| env_value(OBSERVATION_PRIVATE_KEY_ENV));
let private_key_file = options
.private_key_file
.or_else(|| env_value(OBSERVATION_PRIVATE_KEY_FILE_ENV).map(PathBuf::from));
let parent_hash = options
.parent_hash
.or_else(|| env_value(OBSERVATION_PARENT_HASH_ENV));
if rpc_url.is_none()
&& ledger.is_none()
&& private_key.is_none()
&& private_key_file.is_none()
{
return Ok(None);
}
let rpc_url = rpc_url.ok_or_else(|| {
anyhow!(
"set --observation-rpc-url or {} for observation submission",
OBSERVATION_RPC_URL_ENV
)
})?;
let ledger = ledger.ok_or_else(|| {
anyhow!(
"set --observation-ledger or {} for observation submission",
OBSERVATION_LEDGER_ENV
)
})?;
let private_key = match (private_key, private_key_file) {
(Some(value), _) => normalize_private_key(&value)?,
(None, Some(path)) => read_private_key_file(&path)?,
(None, None) => {
return Err(anyhow!(
"set --observation-private-key, --observation-private-key-file, {}, or {}",
OBSERVATION_PRIVATE_KEY_ENV,
OBSERVATION_PRIVATE_KEY_FILE_ENV
))
}
};
Ok(Some(Self {
rpc_url,
ledger,
private_key,
parent_hash,
timeout: Duration::from_millis(options.timeout_ms.max(1_000)),
}))
}
pub async fn submit_manifest(
&self,
manifest: &Manifest,
locator: ManifestObservationLocator,
) -> Result<SubmittedObservation> {
let locator_json =
serde_json::to_string(&locator).context("failed to encode observation locator")?;
let locator_hash = keccak256_bytes_hex(locator_json.as_bytes());
let sequence = manifest.body.chunk_start_index;
let header = manifest_observation_header(
manifest,
self.parent_hash.as_deref(),
&locator_hash,
sequence,
)
.map_err(|err| anyhow!(err))?;
let stream_hash = b256_hex(header.streamHash);
let epoch_hash = b256_hex(header.epochHash);
let data_root = b256_hex(header.dataRoot);
let observation_hash = observation_header_hash(&header);
let slot_hash =
observation_slot_hash(&stream_hash, &epoch_hash).map_err(|err| anyhow!(err))?;
let tuple = format!(
"({},{},{},{},{},{},{})",
stream_hash,
epoch_hash,
b256_hex(header.parentObservationHash),
data_root,
locator_hash,
header.observedUnixMs,
header.sequence
);
let mut cmd = Command::new("cast");
cmd.arg("send")
.arg(&self.ledger)
.arg("proposeObservation((bytes32,bytes32,bytes32,bytes32,bytes32,uint64,uint64))")
.arg(&tuple)
.arg("--rpc-url")
.arg(&self.rpc_url)
.arg("--private-key")
.arg(&self.private_key);
let output = tokio::time::timeout(self.timeout, cmd.output())
.await
.context("timed out submitting observation")?
.context("failed to run cast for observation submission")?;
if !output.status.success() {
return Err(anyhow!(
"cast observation submission failed: {}",
String::from_utf8_lossy(&output.stderr).trim()
));
}
Ok(SubmittedObservation {
observation_hash,
slot_hash,
stream_hash,
epoch_hash,
data_root,
locator_hash,
sequence,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normalize_private_key_accepts_prefixed_and_plain_hex() {
let plain = "11".repeat(32);
assert_eq!(normalize_private_key(&plain).unwrap(), format!("0x{plain}"));
assert_eq!(
normalize_private_key(&format!("0x{plain}")).unwrap(),
format!("0x{plain}")
);
assert!(normalize_private_key("abcd").is_err());
}
}

View file

@ -1,10 +1,12 @@
//! Node runner: orchestrates ingest, chunking, and MoQ publication. //! Node runner: orchestrates ingest, chunking, and MoQ publication.
mod blockchain;
mod nbc; mod nbc;
mod source; mod source;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use blake3; use blake3;
use blockchain::{ManifestObservationLocator, ObservationSink, ObservationSinkOptions};
use clap::ValueEnum; use clap::ValueEnum;
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use ec_chopper::{build_manifest_body_for_chunks, TsChunk}; use ec_chopper::{build_manifest_body_for_chunks, TsChunk};
@ -216,6 +218,24 @@ struct MoqPublishArgs {
/// Publish a CMAF ladder (multiple quality variants) using x264/AAC. /// Publish a CMAF ladder (multiple quality variants) using x264/AAC.
#[arg(long, value_enum)] #[arg(long, value_enum)]
cmaf_ladder: Option<CmafLadderPreset>, cmaf_ladder: Option<CmafLadderPreset>,
/// RPC URL for submitting manifest-derived observation headers.
#[arg(long)]
observation_rpc_url: Option<String>,
/// Deployed EveryChannelObservationLedger address.
#[arg(long)]
observation_ledger: Option<String>,
/// Witness private key for observation transactions (hex). Prefer --observation-private-key-file for services.
#[arg(long)]
observation_private_key: Option<String>,
/// File containing the witness private key for observation transactions.
#[arg(long)]
observation_private_key_file: Option<PathBuf>,
/// Parent observation hash to put into new headers.
#[arg(long)]
observation_parent_hash: Option<String>,
/// Maximum time to wait for each observation transaction.
#[arg(long, default_value_t = 30000)]
observation_timeout_ms: u64,
#[command(subcommand)] #[command(subcommand)]
source: IngestSource, source: IngestSource,
} }
@ -1224,6 +1244,40 @@ fn build_manifest(
Ok(manifest) Ok(manifest)
} }
async fn submit_manifest_observation(
sink: Option<&ObservationSink>,
manifest: &Manifest,
broadcast_name: &str,
track_name: &str,
manifest_track: &str,
) -> Result<()> {
let Some(sink) = sink else {
return Ok(());
};
let locator = ManifestObservationLocator {
transport: "moq-publish".to_string(),
broadcast_name: broadcast_name.to_string(),
track_name: track_name.to_string(),
manifest_track: manifest_track.to_string(),
stream_id: manifest.body.stream_id.0.clone(),
epoch_id: manifest.body.epoch_id.clone(),
manifest_id: manifest.manifest_id.clone(),
};
let submitted = sink.submit_manifest(manifest, locator).await?;
eprintln!(
"observation submitted: observation_hash={} slot_hash={} stream_hash={} epoch_hash={} data_root={} locator_hash={} sequence={}",
submitted.observation_hash,
submitted.slot_hash,
submitted.stream_hash,
submitted.epoch_hash,
submitted.data_root,
submitted.locator_hash,
submitted.sequence
);
Ok(())
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct CmafVariantSpec { struct CmafVariantSpec {
id: String, id: String,
@ -1626,9 +1680,9 @@ fn flush_epoch_publish(
object_sequence: &mut u64, object_sequence: &mut u64,
manifest_sequence: &mut u64, manifest_sequence: &mut u64,
announce_tx: Option<&tokio::sync::mpsc::UnboundedSender<ManifestSummary>>, announce_tx: Option<&tokio::sync::mpsc::UnboundedSender<ManifestSummary>>,
) -> Result<()> { ) -> Result<Option<Manifest>> {
if epoch_buffer.is_empty() { if epoch_buffer.is_empty() {
return Ok(()); return Ok(None);
} }
let (chunks, datas, hashes) = epoch_buffer.take(); let (chunks, datas, hashes) = epoch_buffer.take();
@ -1642,6 +1696,7 @@ fn flush_epoch_publish(
.as_millis() as u64; .as_millis() as u64;
let mut manifest_id = None; let mut manifest_id = None;
let mut published_manifest = None;
if publish_manifests { if publish_manifests {
let manifest = build_manifest( let manifest = build_manifest(
stream_id_value.clone(), stream_id_value.clone(),
@ -1662,6 +1717,7 @@ fn flush_epoch_publish(
if let Some(tx) = announce_tx { if let Some(tx) = announce_tx {
let _ = tx.send(manifest.summary()); let _ = tx.send(manifest.summary());
} }
published_manifest = Some(manifest);
} }
// Compute per-chunk Merkle proofs so subscribers can validate membership // Compute per-chunk Merkle proofs so subscribers can validate membership
@ -1703,7 +1759,7 @@ fn flush_epoch_publish(
} }
} }
Ok(()) Ok(published_manifest)
} }
#[cfg(test)] #[cfg(test)]
@ -1991,6 +2047,15 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
fs::create_dir_all(&args.chunk_dir) fs::create_dir_all(&args.chunk_dir)
.with_context(|| format!("failed to create {}", args.chunk_dir.display()))?; .with_context(|| format!("failed to create {}", args.chunk_dir.display()))?;
let observation_sink = ObservationSink::from_options(ObservationSinkOptions {
rpc_url: args.observation_rpc_url.clone(),
ledger: args.observation_ledger.clone(),
private_key: args.observation_private_key.clone(),
private_key_file: args.observation_private_key_file.clone(),
parent_hash: args.observation_parent_hash.clone(),
timeout_ms: args.observation_timeout_ms,
})?;
let deterministic = deterministic_enabled(args.deterministic); let deterministic = deterministic_enabled(args.deterministic);
let (source, _needs_transcode): (Box<dyn StreamSource + Send>, bool) = match args.source { let (source, _needs_transcode): (Box<dyn StreamSource + Send>, bool) = match args.source {
IngestSource::Hls { url, mut mode } => { IngestSource::Hls { url, mut mode } => {
@ -2451,6 +2516,14 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
manifest, manifest,
} => { } => {
publish_set.publish_manifest(&track, sequence, &manifest)?; publish_set.publish_manifest(&track, sequence, &manifest)?;
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&track,
)
.await?;
} }
} }
} }
@ -2673,7 +2746,7 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
PendingKind::Segment => { PendingKind::Segment => {
epoch_buffer.push(pending.chunk, pending.data, pending.hash); epoch_buffer.push(pending.chunk, pending.data, pending.hash);
if epoch_buffer.is_full() { if epoch_buffer.is_full() {
flush_epoch_publish( if let Some(manifest) = flush_epoch_publish(
&mut publish_set, &mut publish_set,
&track_name, &track_name,
&args.manifest_track, &args.manifest_track,
@ -2690,7 +2763,16 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
&mut object_sequence, &mut object_sequence,
&mut manifest_sequence, &mut manifest_sequence,
announce_tx.as_ref(), announce_tx.as_ref(),
)?; )? {
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&args.manifest_track,
)
.await?;
}
} }
} }
} }
@ -2701,7 +2783,7 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
.await .await
.map_err(|err| anyhow!("chunk task join error: {err}"))??; .map_err(|err| anyhow!("chunk task join error: {err}"))??;
flush_epoch_publish( if let Some(manifest) = flush_epoch_publish(
&mut publish_set, &mut publish_set,
&track_name, &track_name,
&args.manifest_track, &args.manifest_track,
@ -2718,7 +2800,16 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
&mut object_sequence, &mut object_sequence,
&mut manifest_sequence, &mut manifest_sequence,
announce_tx.as_ref(), announce_tx.as_ref(),
)?; )? {
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&args.manifest_track,
)
.await?;
}
Ok(()) Ok(())
} }

View file

@ -0,0 +1,275 @@
use std::fs;
use std::io::{BufRead, BufReader};
use std::process::{Child, Command, Stdio};
use std::time::{Duration, Instant};
const ANVIL_PK0: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80";
const ANVIL_PK1: &str = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn looks_drm(value: &str) -> bool {
let value = value.to_lowercase();
value.contains("drm")
|| value.contains("encrypted")
|| value.contains("protected")
|| value.contains("copy")
|| value.contains("widevine")
}
fn autodiscover_hdhr_host_and_channel() -> Option<(String, String)> {
let devices = ec_hdhomerun::discover().ok()?;
let device = devices.into_iter().next()?;
let lineup = ec_hdhomerun::fetch_lineup(&device).ok()?;
let entry = lineup.iter().find(|e| {
let tag_drm = e.tags.iter().any(|t| looks_drm(t));
let raw_drm = e
.raw
.as_object()
.map(|obj| {
obj.iter()
.any(|(k, v)| looks_drm(k) || looks_drm(&v.to_string()))
})
.unwrap_or(false);
!tag_drm && !raw_drm && e.channel.number.as_deref().unwrap_or("").trim() != ""
})?;
let host = device.ip.clone();
let channel = entry
.channel
.number
.clone()
.or_else(|| Some(entry.channel.name.clone()))
.unwrap_or_else(|| "2.1".to_string());
Some((host, channel))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn repo_root() -> std::path::PathBuf {
std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("workspace root")
.to_path_buf()
}
fn require_tools() -> bool {
["anvil", "cast", "forge", "ffmpeg"]
.into_iter()
.all(|tool| which::which(tool).is_ok())
}
fn wait_for_anvil(rpc_url: &str) {
let deadline = Instant::now() + Duration::from_secs(20);
while Instant::now() < deadline {
let status = Command::new("cast")
.arg("block-number")
.arg("--rpc-url")
.arg(rpc_url)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status();
if matches!(status, Ok(status) if status.success()) {
return;
}
std::thread::sleep(Duration::from_millis(250));
}
panic!("anvil did not become ready");
}
struct ChildGuard {
child: Option<Child>,
}
impl ChildGuard {
fn new(child: Child) -> Self {
Self { child: Some(child) }
}
}
impl Drop for ChildGuard {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
let _ = child.kill();
let _ = child.wait();
}
}
}
fn parse_field(line: &str, field: &str) -> Option<String> {
let prefix = format!("{field}=");
line.split_whitespace()
.find_map(|part| part.strip_prefix(&prefix).map(|value| value.to_string()))
}
#[test]
#[ignore]
fn e2e_hdhr_manifest_observation_finalizes_on_anvil() {
if !require_tools() {
return;
}
let host = env_required("EVERY_CHANNEL_E2E_HDHR_HOST");
let channel = env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL");
let (host, channel) = match (host, channel) {
(Some(host), Some(channel)) => (host, channel),
_ => match autodiscover_hdhr_host_and_channel() {
Some(v) => v,
None => return,
},
};
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let tmp = std::env::temp_dir().join(format!("ec-e2e-hdhr-chain-{ts}"));
fs::create_dir_all(&tmp).unwrap();
let port = 18_545 + (ts % 10_000) as u16;
let rpc_url = format!("http://127.0.0.1:{port}");
let anvil_log = fs::File::create(tmp.join("anvil.log")).unwrap();
let _anvil = ChildGuard::new(
Command::new("anvil")
.arg("--port")
.arg(port.to_string())
.stdout(anvil_log)
.stderr(Stdio::null())
.spawn()
.expect("failed to spawn anvil"),
);
wait_for_anvil(&rpc_url);
let owner_file = tmp.join("owner.key");
fs::write(&owner_file, ANVIL_PK0).unwrap();
let deploy_json = tmp.join("observation-ledger-deploy.json");
let deploy = Command::new(repo_root().join("scripts/op-stack/deploy-observation-ledger.sh"))
.current_dir(repo_root())
.env("EVERY_CHANNEL_RPC_URL", &rpc_url)
.env("EVERY_CHANNEL_PRIVATE_KEY_FILE", &owner_file)
.env("EVERY_CHANNEL_OBSERVATION_QUORUM", "1")
.env("EVERY_CHANNEL_OBSERVATION_DEPLOY_OUT", &deploy_json)
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.status()
.expect("failed to deploy observation ledger");
assert!(deploy.success(), "deploy failed with {deploy}");
let deploy_value: serde_json::Value =
serde_json::from_slice(&fs::read(&deploy_json).unwrap()).unwrap();
let registry = deploy_value["registry"].as_str().unwrap();
let ledger = deploy_value["ledger"].as_str().unwrap();
let witness = Command::new("cast")
.arg("wallet")
.arg("address")
.arg("--private-key")
.arg(ANVIL_PK1)
.output()
.expect("failed to derive witness address");
assert!(witness.status.success());
let witness = String::from_utf8(witness.stdout).unwrap();
let witness = witness.trim();
let add_witness = Command::new("cast")
.arg("send")
.arg(registry)
.arg("addWitness(address)")
.arg(witness)
.arg("--rpc-url")
.arg(&rpc_url)
.arg("--private-key")
.arg(ANVIL_PK0)
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.status()
.expect("failed to add witness");
assert!(
add_witness.success(),
"add witness failed with {add_witness}"
);
let ec_node = ec_node_path();
let broadcast_name = format!("every.channel/e2e/blockchain/{ts}");
let mut publisher = Command::new(&ec_node);
publisher
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", "11".repeat(32))
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("1")
.arg("--chunk-ms")
.arg("2000")
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--observation-rpc-url")
.arg(&rpc_url)
.arg("--observation-ledger")
.arg(ledger)
.arg("--observation-private-key")
.arg(ANVIL_PK1)
.arg("--chunk-dir")
.arg(tmp.join("chunks"))
.arg("hdhr")
.arg("--host")
.arg(&host)
.arg("--channel")
.arg(&channel)
.stdout(Stdio::null())
.stderr(Stdio::piped());
let mut child = publisher.spawn().expect("failed to spawn publisher");
let stderr = child.stderr.take().expect("publisher stderr missing");
let lines = BufReader::new(stderr)
.lines()
.filter_map(|line| line.ok())
.collect::<Vec<_>>();
let status = child.wait().expect("failed to wait for publisher");
assert!(
status.success(),
"publisher exited with {status}: {lines:?}"
);
let observation_line = lines
.iter()
.find(|line| line.starts_with("observation submitted:"))
.expect("publisher did not submit an observation");
let observation_hash = parse_field(observation_line, "observation_hash").unwrap();
let slot_hash = parse_field(observation_line, "slot_hash").unwrap();
let finalized = Command::new("cast")
.arg("call")
.arg(ledger)
.arg("finalizedObservationBySlot(bytes32)(bytes32)")
.arg(slot_hash)
.arg("--rpc-url")
.arg(&rpc_url)
.output()
.expect("failed to read finalized slot");
assert!(finalized.status.success());
let finalized = String::from_utf8(finalized.stdout).unwrap();
assert_eq!(finalized.trim(), observation_hash);
}

View file

@ -7,6 +7,8 @@ This repository owns deployment of `git.every.channel` (Hetzner 300TB host).
- SSH access to `root@git.every.channel`. - SSH access to `root@git.every.channel`.
- Local key that matches host `authorized_keys` (default: `~/.ssh/id_ed25519`). - Local key that matches host `authorized_keys` (default: `~/.ssh/id_ed25519`).
- `nix` with flakes enabled. - `nix` with flakes enabled.
- For emergency Hetzner recovery, Robot Webservice credentials in 1Password item `Hetzner Robot`
or `EVERY_CHANNEL_ROBOT_USER` / `EVERY_CHANNEL_ROBOT_PASSWORD`.
## Deploy ## Deploy
@ -38,6 +40,44 @@ nix run nixpkgs#nixos-rebuild -- \
- `EVERY_CHANNEL_FORGE_BUILD_HOST` (default same as target) - `EVERY_CHANNEL_FORGE_BUILD_HOST` (default same as target)
- `EVERY_CHANNEL_FORGE_SSH_IDENTITY` (default `~/.ssh/id_ed25519`) - `EVERY_CHANNEL_FORGE_SSH_IDENTITY` (default `~/.ssh/id_ed25519`)
## Emergency Robot recovery
Use this only when both Forge HTTPS and SSH are unreachable. The dedicated host is server
`2800441` at `95.216.114.54`.
```sh
./scripts/hetzner-robot-forge.sh probe
```
If the probe confirms outage, sign in to 1Password CLI so the wrapper can read the existing Robot
Webservice item at runtime:
```sh
op signin
./scripts/hetzner-robot-forge.sh status
```
To boot the host into Hetzner Rescue and issue a hardware reset:
```sh
./scripts/hetzner-robot-forge.sh recover
./scripts/hetzner-robot-forge.sh wait-ssh
```
The wrapper masks Robot-generated rescue passwords by default and tries to attach the local SSH key
fingerprint when activating rescue. Set `EVERY_CHANNEL_ROBOT_AUTHORIZED_KEY_FINGERPRINT` if Robot
uses a different uploaded key fingerprint. Set `EVERY_CHANNEL_ROBOT_PRINT_SENSITIVE=1` only when
password-based rescue login is required.
If production boots but public SSH and HTTPS still time out, inspect the previous boot from Rescue.
The known recovery check is host-wide VPN state: `mullvad-daemon.service` must not be active on
`ecp-forge`, because its firewall policy can block public Forge ingress even when Robot and the
NixOS firewall allow the ports. If a not-yet-redeployed generation still starts Mullvad and the
mutable cached target state is rewritten to `secured`, back up `/boot/grub/grub.cfg`, append
`systemd.mask=mullvad-daemon.service systemd.mask=mullvad-early-boot-blocking.service` to the
default Linux line, and reboot production. After public SSH returns, deploy this repo's NixOS config
so the bootloader is regenerated without the emergency mask.
## Verify ## Verify
```sh ```sh

View file

@ -98,7 +98,7 @@ ssh -o BatchMode=yes -o IdentityAgent=none -o IdentitiesOnly=yes -i ~/.ssh/id_ed
Once the rollup RPC is live, deploy the observation rail to the L2 RPC: Once the rollup RPC is live, deploy the observation rail to the L2 RPC:
```sh ```sh
EVERY_CHANNEL_RPC_URL=http://127.0.0.1:8545 \ EVERY_CHANNEL_RPC_URL=http://127.0.0.1:28545 \
EVERY_CHANNEL_PRIVATE_KEY_FILE=/path/to/private-key \ EVERY_CHANNEL_PRIVATE_KEY_FILE=/path/to/private-key \
./scripts/op-stack/deploy-observation-ledger.sh ./scripts/op-stack/deploy-observation-ledger.sh
``` ```
@ -106,5 +106,10 @@ EVERY_CHANNEL_PRIVATE_KEY_FILE=/path/to/private-key \
## Notes ## Notes
- `op-geth` and `op-node` RPC surfaces bind to `127.0.0.1` on `ecp-forge`. - `op-geth` and `op-node` RPC surfaces bind to `127.0.0.1` on `ecp-forge`.
- The OP Stack L2 execution RPC defaults to `127.0.0.1:28545`, not `8545`; `8545` is reserved for
the full Ethereum mainnet node on the same host.
- `op-geth` P2P uses `28549`, not the Ethereum default `30303`; `30303` is reserved for the host
full Ethereum node.
- The public firewall opening is only for the `op-node` P2P port. - The public firewall opening is only for the `op-node` P2P port.
- The bootstrap uses `op-deployer/v0.6.0-rc.3` by default and official OP Labs container images. - The bootstrap uses `op-deployer/v0.6.0-rc.3` by default and OP Labs runtime images aligned to the
generated rollup schema.

View file

@ -79,6 +79,18 @@ On Linux / forge hosts, the equivalent worker path lives in `ec-node`:
- publish with - publish with
`ec-node nbc-wt-publish --url https://cdn.moq.dev/anon --name forge-nbc-sports-philly --source-url 'https://www.nbc.com/live?brand=nbc-sports-philadelphia'` `ec-node nbc-wt-publish --url https://cdn.moq.dev/anon --name forge-nbc-sports-philly --source-url 'https://www.nbc.com/live?brand=nbc-sports-philadelphia'`
- for unattended hosts, persist the Chrome profile with `EVERY_CHANNEL_NBC_PROFILE_DIR=/path/to/profile` - for unattended hosts, persist the Chrome profile with `EVERY_CHANNEL_NBC_PROFILE_DIR=/path/to/profile`
- to automate a Verizon popup on Linux / forge, pass MVPD credentials via env or file paths:
`EVERY_CHANNEL_NBC_MVPD_USERNAME`, `EVERY_CHANNEL_NBC_MVPD_PASSWORD`,
`EVERY_CHANNEL_NBC_MVPD_USERNAME_FILE`, `EVERY_CHANNEL_NBC_MVPD_PASSWORD_FILE`
- the NixOS module can point the Linux worker at root-managed credential files with
`services.every-channel.ec-node.nbc.mvpdUsernameFile` and
`services.every-channel.ec-node.nbc.mvpdPasswordFile`
- for forge-style isolation, the NixOS module can keep only the NBC publisher inside a rootless
user+network namespace backed by `slirp4netns` with
`services.every-channel.ec-node.nbc.isolateWithUserNetns = true`
- pair that with `services.every-channel.ec-node.nbc.requireMullvad = true` to block worker startup
until the host Mullvad daemon is connected; optionally pin a region/country family with
`services.every-channel.ec-node.nbc.mullvadLocation = "USA"`
- the NixOS module exposes `services.every-channel.ec-node.nbc.*` for a persistent Xvfb display plus - the NixOS module exposes `services.every-channel.ec-node.nbc.*` for a persistent Xvfb display plus
an optional local-only VNC bridge so MVPD auth can be completed only when the session is cold an optional local-only VNC bridge so MVPD auth can be completed only when the session is cold
- on Linux virtual displays, the worker disables Chrome GPU acceleration by default; only set - on Linux virtual displays, the worker disables Chrome GPU acceleration by default; only set
@ -115,6 +127,47 @@ Requires Nix (so `ac-ffmpeg` finds FFmpeg headers):
./scripts/e2e-hdhr.sh --host <HDHR_HOST> --channel <CHANNEL> ./scripts/e2e-hdhr.sh --host <HDHR_HOST> --channel <CHANNEL>
``` ```
## HDHomeRun + Observation Chain E2E Test
This runs a local Anvil chain, deploys the observation registry/ledger, publishes one HDHomeRun
manifest epoch, and verifies that the manifest-derived observation finalizes on-chain.
Requires Nix, Foundry, and a reachable local HDHomeRun:
```sh
./scripts/e2e-hdhr-blockchain.sh --host <HDHR_HOST> --channel <CHANNEL>
```
## Local HDHomeRun Publisher Against Remote Observation Chain
The remote OP Stack RPC on `ecp-forge` is intentionally local-only. From the local publisher box,
tunnel it first:
```sh
ssh -N -L 9545:127.0.0.1:28545 root@git.every.channel
```
Then run a local HDHomeRun publisher with observation submission enabled:
```sh
cargo run -p ec-node -- moq-publish \
--publish-manifests \
--epoch-chunks 1 \
--broadcast-name local-hdhr-8-1 \
--observation-rpc-url http://127.0.0.1:9545 \
--observation-ledger <OBSERVATION_LEDGER_ADDRESS> \
--observation-private-key-file /path/to/witness.key \
hdhr --host <HDHR_HOST> --channel <CHANNEL>
```
Environment fallbacks are also supported:
- `EVERY_CHANNEL_OBSERVATION_RPC_URL`
- `EVERY_CHANNEL_OBSERVATION_LEDGER`
- `EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY`
- `EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY_FILE`
- `EVERY_CHANNEL_OBSERVATION_PARENT_HASH`
## Mesh E2E Test (Split Sources) ## Mesh E2E Test (Split Sources)
This runs two publishers over the same broadcast: This runs two publishers over the same broadcast:

View file

@ -0,0 +1,56 @@
# ECP-0109: Local HDHomeRun publishers submit observation rail commitments
Status: Draft
## Problem / context
`ecp-forge` has the Ethereum / OP Stack direction and observation ledger contracts, while local
nodes have the HDHomeRun tuners and can already produce verified manifests. The missing bridge is a
publisher path that can run on the local LAN, observe real tuner-derived epochs, and submit compact
observation headers to the remote chain without moving media bytes on chain.
## Decision
Add an optional observation-rail sink to `ec-node moq-publish`:
- each published manifest epoch can become one `EveryChannelObservationLedger.ObservationHeader`,
- `streamHash` is `keccak256(stream_id)`,
- `epochHash` is `keccak256(epoch_id)`,
- `dataRoot` is the manifest's Ethereum data-root commitment,
- `locatorHash` commits to a compact JSON locator for the manifest and MoQ broadcast,
- `observedUnixMs` and `sequence` come from the manifest body, and
- submission uses a configured RPC URL, ledger address, and witness private key.
The sink is disabled unless explicitly configured. It is intended for a local publisher talking to
the remote every.channel chain through the remote host's local-only RPC surface, typically via an
SSH tunnel. The OP Stack L2 RPC uses a distinct local port from the full Ethereum nodes on the same
host so publisher submissions do not accidentally target mainnet or Sepolia L1 RPC.
## Consequences
- Local HDHomeRun boxes can act as reality witnesses without running the full chain locally.
- The chain stores compact observation commitments only; media segments and full manifests remain
on MoQ / iroh / archive storage.
- The first implementation uses Foundry `cast` for transaction submission so the repo can validate
end-to-end with Anvil before committing to an embedded Rust transaction signer.
- A quorum greater than one still requires additional witnesses to attest; the local publisher only
proposes and self-attests when the configured key is a registry witness.
## Alternatives considered
- Run the full chain locally next to the HDHomeRuns. Rejected because the desired validation target
is the remote every.channel chain, and a local chain would hide remote reachability/configuration
failures.
- Push full media or manifests on chain. Rejected because the observation rail only needs compact
commitments and locators.
- Add an embedded Rust transaction signer immediately. Deferred until the end-to-end rail proves
useful with Foundry tooling.
## Rollout / teardown
1. Add manifest-to-observation derivation in `ec-eth`.
2. Add optional `ec-node moq-publish` flags and environment fallbacks for observation submission.
3. Add an ignored HDHomeRun + Anvil E2E test and a wrapper script.
4. Point local publishers at the remote RPC once the remote chain is reachable.
Teardown is simply disabling the observation options; local manifest publication remains unchanged.

View file

@ -0,0 +1,50 @@
# ECP-0110: `ecp-forge` Hetzner Robot recovery wrapper
Status: Draft
## Problem / context
`git.every.channel` is a single dedicated Hetzner host. When SSH and HTTPS are both unreachable,
the blockchain and Forgejo validation path stalls before repo-owned deployment tools can connect.
Robot can recover the host, but browser-only recovery is hard to repeat and easy to lose across
agent handoffs.
## Decision
Add a repo-local Robot wrapper for `ecp-forge` recovery:
- default to server `2800441` / `95.216.114.54`,
- read Robot Webservice credentials from environment variables or the existing 1Password item at
runtime,
- avoid storing Robot passwords in git or shell profiles,
- expose explicit status, rescue, reset, recover, and reachability-probe commands, and
- mask Robot-generated rescue passwords unless the operator explicitly opts into printing them.
The wrapper treats rescue activation and reset as operational recovery steps, not deployment. Once
the host is reachable again, `scripts/deploy-ecp-forge.sh` remains the source of truth for the
NixOS system state.
## Consequences
- Future agents can recover the Forge after a local 1Password CLI sign-in without asking for pasted
Robot secrets.
- The host identity and Robot server number are documented in the repo instead of being rediscovered
from the browser UI.
- Recovery actions remain explicit commands; ordinary probes never mutate Robot state.
## Alternatives considered
- Continue browser-only Robot recovery. Rejected because it is too stateful for repeated agent
handoffs and does not leave a repo-owned runbook.
- Store Robot credentials in a repo-local file. Rejected because Robot credentials are operational
secrets and should stay in 1Password or the caller's environment.
- Move recovery into the deploy script. Rejected because Robot rescue/reset is a host-recovery action,
while `deploy-ecp-forge.sh` should remain the NixOS deployment entrypoint.
## Rollout / teardown
1. Add `scripts/hetzner-robot-forge.sh`.
2. Document the emergency path in `docs/DEPLOY_ECP_FORGE.md`.
3. Use `probe` first, then `status`, then `recover` only when the Forge is unreachable.
Teardown is removing the wrapper and returning to browser-only Robot operations.

View file

@ -0,0 +1,46 @@
# ECP-0111: Disable Host Mullvad for Forge Public Recovery
Status: Draft
## Problem / context
`git.every.channel` must stay reachable on public SSH and HTTPS so blockchain validation, deploys,
and Forgejo review can proceed. The current `ecp-forge` boot reaches Forgejo, Caddy, and SSH socket
activation, but the host becomes unreachable once the host-wide Mullvad daemon connects and applies
its firewall policy.
## Decision
Disable host-wide Mullvad on `ecp-forge` and stop making forge NBC workers wait for host Mullvad.
The public Forge host stays on the Hetzner interface. NBC egress that needs Mullvad should return
through a process-scoped or namespace-scoped design that does not install a host-wide kill switch.
## Consequences
- `git.every.channel` can serve SSH, HTTPS, and ACME challenges on the public Hetzner address.
- Forge recovery no longer depends on manual Mullvad split-tunnel state.
- Forge NBC Philadelphia publishing loses the host-wide Mullvad egress assumption until a narrower
worker-only egress path lands.
## Alternatives considered
- Keep host-wide Mullvad and rely on split-tunnel exceptions. Rejected because production logs show
public SSH and HTTPS time out while Mullvad's firewall policy is active.
- Keep Mullvad enabled but mask only Caddy or SSH from the tunnel. Rejected because the daemon's
firewall policy still governs inbound public reachability at the host level.
- Disable the whole `ec-node` service. Rejected because archive and blockchain workers should remain
independent of the NBC egress incident.
## Rollout / teardown
1. From Rescue, inspect the previous boot and confirm Forgejo/Caddy start before Mullvad applies its
firewall policy.
2. If Mullvad rewrites its cached target state back to `secured`, temporarily append
`systemd.mask=mullvad-daemon.service systemd.mask=mullvad-early-boot-blocking.service` to the
default GRUB entry and reboot production.
3. Deploy the NixOS config that keeps host-wide Mullvad disabled, which regenerates the bootloader
without the emergency mask.
4. Verify `ssh`, `https://git.every.channel/`, Forgejo, and Caddy.
Teardown is re-enabling host Mullvad only after a tested design preserves public inbound Forge
traffic.

View file

@ -0,0 +1,39 @@
# ECP-0112: Match Nested OP Deployer Intent Schema
Status: Draft
## Problem / context
`ecp-forge` OP Stack bootstrap failed with `missing key id` even though
`/var/lib/every-channel/op-stack/deployer/.deployer/intent.toml` contained an `id` field. After that
was repaired, bootstrap also found a placeholder `state.json` whose deployment fields were still
null. The current `op-deployer` intent format writes chain and role values under nested TOML
sections, while the bootstrap helper only matched keys at the start of a line and treated any
`state.json` as completed state.
## Decision
Update the OP Stack bootstrap helper to replace TOML keys after optional indentation, preserve that
indentation when writing the replacement value, and run `op-deployer apply` unless the state file has
non-null applied deployment fields.
## Consequences
- The existing `op-deployer/v0.6.0-rc.3` intent file can be repaired in place.
- The bootstrap service can generate sequencer, batcher, proposer, challenger, and dispute monitor
runtime config from the existing deployment state.
- Placeholder `state.json` files no longer block the apply step.
- The change stays compatible with flat TOML keys if `op-deployer` changes the layout again.
## Alternatives considered
- Regenerate the deployment state from scratch. Rejected because a surgical config repair is safer
for an already deployed OP Stack root.
- Keep matching only top-level keys. Rejected because it does not match the live `op-deployer`
schema on `ecp-forge`.
## Rollout / teardown
Deploy the updated bootstrap helper, restart `every-channel-op-stack-bootstrap.service`, and then
restart the dependent OP Stack containers. Teardown is reverting this helper change and regenerating
the OP Stack root with a known-flat intent schema.

View file

@ -0,0 +1,53 @@
# ECP-0113: Keep OP Stack Runtime Compatible With Forge Host Services
Status: Draft
## Problem / context
`ecp-forge` now runs the OP Stack bootstrap far enough to produce `deployment.json`, `genesis.json`,
and `rollup.json`, but the runtime containers still failed to stay up. `op-geth` tried to bind the
default Ethereum P2P port `30303`, already owned by the host full Ethereum node. The pinned
`op-node:v1.13.5` rejected current `op-deployer/v0.6.0-rc.3` rollup fields such as `minBaseFee`.
After aligning to `op-node:v1.14.0`, that image still rejected the newer
`genesis.system_config.daFootprintGasScalar` field. The generated rollup config also carried
`eip1559Params = 0x0000000000000000` even though the genesis `extraData` and chain config encode
denominator `250` and elasticity `6`; that zero value caused `op-geth` to panic when the sequencer
requested the first payload. `op-batcher:v1.14.0` also no longer accepts `--batch-inbox-address`.
Isolated compatibility probes showed `op-node:v1.16.6` paired with `op-geth:v1.101702.0-rc.1` can
run against the generated genesis hash and produce L2 blocks.
## Decision
Assign `op-geth` a repo-owned L2 P2P port in the existing `285xx` range, align `op-node` to the
probed `v1.16.6` runtime, move `op-geth` to the probed
`v1.101702.0-rc.1` image, remove the stale batcher inbox-address flag, delete only
`genesis.system_config.daFootprintGasScalar` from generated rollup configs, and derive zero
`eip1559Params` from the generated `chain_op_config`.
## Consequences
- The host Ethereum node can keep `30303` without blocking OP Stack startup.
- The OP Stack RPC and P2P port assignments stay documented in repo config.
- Runtime image compatibility is explicit in Nix config.
- The rollup JSON normalization is intentionally narrow: it removes the exact field rejected by the
older `op-node:v1.14.0` parser and repairs only the zero EIP-1559 params that caused the live
`op-geth` payload panic.
- The `op-geth` image is an explicit release-candidate tag because the previously pinned image
panicked against the current deployer output.
## Alternatives considered
- Stop the host full Ethereum node. Rejected because the OP Stack should coexist with the existing
Ethereum services.
- Strip all newer-looking fields from `rollup.json`. Rejected because `op-node:v1.14.0` accepts the
other generated fields tested during recovery; broad deletion would hide schema drift.
- Leave zero `eip1559Params` in place. Rejected because the live sequencer/geth pair panicked before
the first L2 block could be built.
- Keep `op-geth:v1.101511.1`. Rejected because it reproducibly panics on first payload construction
for this generated chain config.
## Rollout / teardown
Deploy the updated NixOS module and bootstrap helper, reset failed OP Stack units, and verify L2 RPC
and rollup RPC locally on `ecp-forge`. Teardown is reverting the port assignment and rollup JSON
normalization, then regenerating runtime files with a mutually compatible deployer/runtime image set.

View file

@ -309,6 +309,60 @@ in
description = "Pass `EVERY_CHANNEL_NBC_NO_SANDBOX=1` for Chrome worker sessions."; description = "Pass `EVERY_CHANNEL_NBC_NO_SANDBOX=1` for Chrome worker sessions.";
}; };
mvpdProvider = lib.mkOption {
type = lib.types.str;
default = "Verizon Fios";
description = "MVPD provider name used when the NBC worker must choose a TV provider.";
};
mvpdUsernameFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "Optional root-managed file containing the MVPD username for unattended NBC login.";
};
mvpdPasswordFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "Optional root-managed file containing the MVPD password for unattended NBC login.";
};
isolateWithUserNetns = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Launch NBC browser-backed workers inside a rootless user+network namespace backed by
slirp4netns. This keeps the Chrome / ec-node process tree in its own network context
while still using the host's active upstream route.
'';
};
requireMullvad = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Refuse to start NBC browser-backed workers until `mullvad status` reports a connected
tunnel. This assumes the host Mullvad daemon is already logged in and connected.
'';
};
mullvadWaitSeconds = lib.mkOption {
type = lib.types.ints.positive;
default = 90;
description = "Maximum time to wait for Mullvad connectivity before failing an NBC worker start.";
};
mullvadLocation = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "USA";
description = ''
Optional case-insensitive substring that must appear in `mullvad status` before an NBC
worker starts. Use this to pin workers to a country or city family without committing the
operational login material itself.
'';
};
vnc = { vnc = {
enable = lib.mkOption { enable = lib.mkOption {
type = lib.types.bool; type = lib.types.bool;
@ -435,6 +489,11 @@ in
pkgs.iproute2 pkgs.iproute2
cfg.package cfg.package
] ]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ pkgs.mullvad-vpn ]
++ lib.optionals (isNbc && cfg.nbc.isolateWithUserNetns) [
pkgs.slirp4netns
pkgs.util-linux
]
++ lib.optionals cfg.hdhomerun.autoDiscover [ pkgs.jq cfg.discoveryPackage ]; ++ lib.optionals cfg.hdhomerun.autoDiscover [ pkgs.jq cfg.discoveryPackage ];
text = text =
let let
@ -452,10 +511,75 @@ in
controlDiscoveryStr = if cfg.control.discovery == null then "" else cfg.control.discovery; controlDiscoveryStr = if cfg.control.discovery == null then "" else cfg.control.discovery;
controlIrohSecretStr = if cfg.control.irohSecret == null then "" else cfg.control.irohSecret; controlIrohSecretStr = if cfg.control.irohSecret == null then "" else cfg.control.irohSecret;
controlGossipPeerLines = lib.concatMapStrings (peer: "cmd+=(--gossip-peer ${lib.escapeShellArg peer})\n") cfg.control.gossipPeers; controlGossipPeerLines = lib.concatMapStrings (peer: "cmd+=(--gossip-peer ${lib.escapeShellArg peer})\n") cfg.control.gossipPeers;
nbcMullvadLocationStr = if cfg.nbc.mullvadLocation == null then "" else cfg.nbc.mullvadLocation;
in in
'' ''
set -euo pipefail set -euo pipefail
wait_for_mullvad() {
local wait_seconds status expected
wait_seconds=${toString cfg.nbc.mullvadWaitSeconds}
expected=${lib.escapeShellArg nbcMullvadLocationStr}
for _ in $(seq 1 "$wait_seconds"); do
status="$(mullvad status 2>/dev/null || true)"
if [[ "$status" == Connected* ]]; then
if [[ -z "$expected" ]] || printf '%s\n' "$status" | grep -Fqi -- "$expected"; then
return 0
fi
fi
sleep 1
done
echo "ec-node: Mullvad was not connected${lib.optionalString (cfg.nbc.mullvadLocation != null) " to the expected location"} within ${toString cfg.nbc.mullvadWaitSeconds}s" >&2
mullvad status >&2 || true
return 1
}
run_in_user_netns() {
local tmpdir pid_file ready_fifo ns_pid slirp_pid status
tmpdir="$(mktemp -d /tmp/${unit}.usernet.XXXXXX)"
pid_file="$tmpdir/pid"
ready_fifo="$tmpdir/ready"
mkfifo "$ready_fifo"
# shellcheck disable=SC2016
unshare --user --map-root-user --net ${pkgs.bash}/bin/bash -lc '
set -euo pipefail
ip link set lo up
echo $$ > "$1"
read -r _ < "$2"
shift 2
exec "$@"
' bash "$pid_file" "$ready_fifo" "''${cmd[@]}" &
ns_pid=$!
for _ in $(seq 1 50); do
[[ -s "$pid_file" ]] && break
sleep 0.1
done
if [[ ! -s "$pid_file" ]]; then
echo "ec-node: timed out waiting for NBC user-netns PID" >&2
kill "$ns_pid" 2>/dev/null || true
rm -rf "$tmpdir"
return 1
fi
slirp4netns --configure --mtu=1500 "$(cat "$pid_file")" tap0 >"$tmpdir/slirp.log" 2>&1 &
slirp_pid=$!
sleep 1
printf 'go\n' > "$ready_fifo"
set +e
wait "$ns_pid"
status=$?
set -e
kill "$slirp_pid" 2>/dev/null || true
wait "$slirp_pid" 2>/dev/null || true
rm -rf "$tmpdir"
return "$status"
}
nbc_url=${lib.escapeShellArg nbcUrlStr} nbc_url=${lib.escapeShellArg nbcUrlStr}
input="" input=""
if [[ -z "$nbc_url" ]]; then if [[ -z "$nbc_url" ]]; then
@ -596,7 +720,16 @@ in
# quickly during activation. # quickly during activation.
trap 'exit 0' INT TERM trap 'exit 0' INT TERM
while true; do while true; do
${lib.optionalString (isNbc && cfg.nbc.requireMullvad) ''
if ! wait_for_mullvad; then
sleep 2
continue
fi
''}
${lib.optionalString (isNbc && cfg.nbc.isolateWithUserNetns) "run_in_user_netns || true"}
${lib.optionalString (!isNbc || !cfg.nbc.isolateWithUserNetns) ''
"''${cmd[@]}" || true "''${cmd[@]}" || true
''}
sleep 2 sleep 2
done done
''; '';
@ -609,10 +742,12 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = after =
[ "network-online.target" ] [ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]; ++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ "mullvad-daemon.service" ];
wants = wants =
[ "network-online.target" ] [ "network-online.target" ]
++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]; ++ lib.optionals isNbc [ "every-channel-nbc-display.service" ]
++ lib.optionals (isNbc && cfg.nbc.requireMullvad) [ "mullvad-daemon.service" ];
# Keep the unit from entering "failed" due to rapid restarts (deploy-flake treats # Keep the unit from entering "failed" due to rapid restarts (deploy-flake treats
# failed units during `switch-to-configuration test` as a deployment failure). # failed units during `switch-to-configuration test` as a deployment failure).
@ -652,13 +787,22 @@ in
environment = environment =
cfg.environment cfg.environment
// lib.optionalAttrs isNbc { // lib.optionalAttrs isNbc (
{
DISPLAY = cfg.nbc.display; DISPLAY = cfg.nbc.display;
EVERY_CHANNEL_NBC_CHROME_PATH = cfg.nbc.chromeBinary; EVERY_CHANNEL_NBC_CHROME_PATH = cfg.nbc.chromeBinary;
EVERY_CHANNEL_NBC_MVPD_PROVIDER = cfg.nbc.mvpdProvider;
EVERY_CHANNEL_NBC_PROFILE_DIR = cfg.nbc.profileDir; EVERY_CHANNEL_NBC_PROFILE_DIR = cfg.nbc.profileDir;
EVERY_CHANNEL_NBC_NO_SANDBOX = if cfg.nbc.noSandbox then "1" else "0"; EVERY_CHANNEL_NBC_NO_SANDBOX = if cfg.nbc.noSandbox then "1" else "0";
HOME = "/var/lib/every-channel"; HOME = "/var/lib/every-channel";
}; }
// lib.optionalAttrs (cfg.nbc.mvpdUsernameFile != null) {
EVERY_CHANNEL_NBC_MVPD_USERNAME_FILE = toString cfg.nbc.mvpdUsernameFile;
}
// lib.optionalAttrs (cfg.nbc.mvpdPasswordFile != null) {
EVERY_CHANNEL_NBC_MVPD_PASSWORD_FILE = toString cfg.nbc.mvpdPasswordFile;
}
);
}; };
}) })
cfg.broadcasts) cfg.broadcasts)

View file

@ -54,6 +54,50 @@ in
description = "P2P listen port for op-node."; description = "P2P listen port for op-node.";
}; };
ports = {
l2Http = lib.mkOption {
type = lib.types.port;
default = 28545;
description = "Local op-geth HTTP JSON-RPC port.";
};
l2Ws = lib.mkOption {
type = lib.types.port;
default = 28546;
description = "Local op-geth WebSocket JSON-RPC port.";
};
l2Auth = lib.mkOption {
type = lib.types.port;
default = 28551;
description = "Local op-geth Engine API port.";
};
l2P2p = lib.mkOption {
type = lib.types.port;
default = 28549;
description = "Local op-geth P2P port, kept away from the host Ethereum node's 30303.";
};
rollupRpc = lib.mkOption {
type = lib.types.port;
default = 28547;
description = "Local op-node rollup RPC port.";
};
batcherRpc = lib.mkOption {
type = lib.types.port;
default = 28548;
description = "Local op-batcher admin RPC port.";
};
proposerRpc = lib.mkOption {
type = lib.types.port;
default = 28560;
description = "Local op-proposer admin RPC port.";
};
};
openFirewall = lib.mkOption { openFirewall = lib.mkOption {
type = lib.types.bool; type = lib.types.bool;
default = true; default = true;
@ -87,12 +131,12 @@ in
images = { images = {
opNode = lib.mkOption { opNode = lib.mkOption {
type = lib.types.str; type = lib.types.str;
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.13.5"; default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.16.6";
description = "Container image for op-node."; description = "Container image for op-node.";
}; };
opGeth = lib.mkOption { opGeth = lib.mkOption {
type = lib.types.str; type = lib.types.str;
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101511.1"; default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101702.0-rc.1";
description = "Container image for op-geth."; description = "Container image for op-geth.";
}; };
batcher = lib.mkOption { batcher = lib.mkOption {
@ -177,7 +221,7 @@ in
if [ ! -d /workspace/op-geth-data/geth/chaindata ]; then if [ ! -d /workspace/op-geth-data/geth/chaindata ]; then
geth init --datadir=/workspace/op-geth-data --state.scheme=hash /workspace/genesis.json geth init --datadir=/workspace/op-geth-data --state.scheme=hash /workspace/genesis.json
fi fi
exec geth --datadir=/workspace/op-geth-data --http --http.addr=127.0.0.1 --http.port=8545 --ws --ws.addr=127.0.0.1 --ws.port=8546 --authrpc.addr=127.0.0.1 --authrpc.port=8551 --authrpc.jwtsecret=/workspace/jwt.txt --syncmode=full --gcmode=archive --rollup.disabletxpoolgossip=true --http.vhosts=* --http.corsdomain=* --http.api=eth,net,web3,debug,txpool,admin --ws.origins=* --ws.api=eth,net,web3,debug,txpool,admin --authrpc.vhosts=* exec geth --datadir=/workspace/op-geth-data --http --http.addr=127.0.0.1 --http.port=${toString cfg.ports.l2Http} --ws --ws.addr=127.0.0.1 --ws.port=${toString cfg.ports.l2Ws} --authrpc.addr=127.0.0.1 --authrpc.port=${toString cfg.ports.l2Auth} --authrpc.jwtsecret=/workspace/jwt.txt --port=${toString cfg.ports.l2P2p} --syncmode=full --gcmode=archive --rollup.disabletxpoolgossip=true --http.vhosts=* --http.corsdomain=* --http.api=eth,net,web3,debug,txpool,admin --ws.origins=* --ws.api=eth,net,web3,debug,txpool,admin --authrpc.vhosts=*
'' ''
]; ];
}; };
@ -198,7 +242,7 @@ in
exec op-node \ exec op-node \
--l1="$L1_RPC_URL" \ --l1="$L1_RPC_URL" \
--l1.beacon="$L1_BEACON_URL" \ --l1.beacon="$L1_BEACON_URL" \
--l2=http://127.0.0.1:8551 \ --l2=http://127.0.0.1:${toString cfg.ports.l2Auth} \
--l2.jwt-secret=/workspace/jwt.txt \ --l2.jwt-secret=/workspace/jwt.txt \
--rollup.config=/workspace/rollup.json \ --rollup.config=/workspace/rollup.json \
--sequencer.enabled=true \ --sequencer.enabled=true \
@ -213,7 +257,7 @@ in
--p2p.advertise.udp=${toString cfg.p2pListenPort} \ --p2p.advertise.udp=${toString cfg.p2pListenPort} \
--p2p.sequencer.key="$PRIVATE_KEY" \ --p2p.sequencer.key="$PRIVATE_KEY" \
--rpc.addr=127.0.0.1 \ --rpc.addr=127.0.0.1 \
--rpc.port=8547 \ --rpc.port=${toString cfg.ports.rollupRpc} \
--rpc.enable-admin \ --rpc.enable-admin \
--log.level=info \ --log.level=info \
--log.format=json --log.format=json
@ -236,9 +280,8 @@ in
--l2-eth-rpc="$L2_RPC_URL" \ --l2-eth-rpc="$L2_RPC_URL" \
--rollup-rpc="$ROLLUP_RPC_URL" \ --rollup-rpc="$ROLLUP_RPC_URL" \
--private-key="$PRIVATE_KEY" \ --private-key="$PRIVATE_KEY" \
--batch-inbox-address="$BATCH_INBOX_ADDRESS" \
--rpc.addr=127.0.0.1 \ --rpc.addr=127.0.0.1 \
--rpc.port=8548 \ --rpc.port=${toString cfg.ports.batcherRpc} \
--rpc.enable-admin \ --rpc.enable-admin \
--max-channel-duration=1 \ --max-channel-duration=1 \
--data-availability-type=calldata \ --data-availability-type=calldata \
@ -260,7 +303,7 @@ in
"-lc" "-lc"
'' ''
exec op-proposer \ exec op-proposer \
--rpc.port=8560 \ --rpc.port=${toString cfg.ports.proposerRpc} \
--rollup-rpc="$ROLLUP_RPC_URL" \ --rollup-rpc="$ROLLUP_RPC_URL" \
--l1-eth-rpc="$L1_RPC_URL" \ --l1-eth-rpc="$L1_RPC_URL" \
--private-key="$PRIVATE_KEY" \ --private-key="$PRIVATE_KEY" \
@ -347,6 +390,8 @@ in
export EVERY_CHANNEL_OP_STACK_L1_BEACON_URL=${lib.escapeShellArg cfg.l1BeaconUrl} export EVERY_CHANNEL_OP_STACK_L1_BEACON_URL=${lib.escapeShellArg cfg.l1BeaconUrl}
export EVERY_CHANNEL_OP_STACK_CHAIN_ID=${toString cfg.chainId} export EVERY_CHANNEL_OP_STACK_CHAIN_ID=${toString cfg.chainId}
export EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP=${lib.escapeShellArg cfg.p2pAdvertiseIp} export EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP=${lib.escapeShellArg cfg.p2pAdvertiseIp}
export EVERY_CHANNEL_OP_STACK_L2_RPC_URL=http://127.0.0.1:${toString cfg.ports.l2Http}
export EVERY_CHANNEL_OP_STACK_ROLLUP_RPC_URL=http://127.0.0.1:${toString cfg.ports.rollupRpc}
export EVERY_CHANNEL_OP_DEPLOYER_BIN=${lib.escapeShellArg "${cfg.rootDir}/bin/op-deployer"} export EVERY_CHANNEL_OP_DEPLOYER_BIN=${lib.escapeShellArg "${cfg.rootDir}/bin/op-deployer"}
export EVERY_CHANNEL_OP_DEPLOYER_TAG=${lib.escapeShellArg cfg.opDeployerTag} export EVERY_CHANNEL_OP_DEPLOYER_TAG=${lib.escapeShellArg cfg.opDeployerTag}
export EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT=${lib.escapeShellArg downloadScript} export EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT=${lib.escapeShellArg downloadScript}

View file

@ -263,6 +263,9 @@ in
chromeBinary = "${pkgs.google-chrome}/bin/google-chrome-stable"; chromeBinary = "${pkgs.google-chrome}/bin/google-chrome-stable";
display = ":120"; display = ":120";
screen = "1920x1080x24"; screen = "1920x1080x24";
isolateWithUserNetns = true;
requireMullvad = false;
mullvadLocation = null;
noSandbox = true; noSandbox = true;
vnc = { vnc = {
enable = true; enable = true;
@ -295,13 +298,8 @@ in
}; };
services.mullvad-vpn = { services.mullvad-vpn = {
enable = true; enable = false;
enableExcludeWrapper = true; enableExcludeWrapper = false;
};
systemd.services.every-channel-wt-publish-forge-nbc-sports-philly = {
after = [ "mullvad-daemon.service" ];
wants = [ "mullvad-daemon.service" ];
}; };
services.every-channel.op-stack = { services.every-channel.op-stack = {

55
scripts/e2e-hdhr-blockchain.sh Executable file
View file

@ -0,0 +1,55 @@
#!/usr/bin/env bash
set -euo pipefail
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${root}"
host="${EVERY_CHANNEL_E2E_HDHR_HOST:-}"
channel="${EVERY_CHANNEL_E2E_HDHR_CHANNEL:-}"
usage() {
cat >&2 <<'EOF'
usage:
scripts/e2e-hdhr-blockchain.sh --host <HDHR_HOST> --channel <CHANNEL>
notes:
- starts a local Anvil chain
- deploys the observation registry and ledger with quorum=1
- runs ec-node against the HDHomeRun source
- verifies that the published manifest observation finalizes on-chain
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
host="${2:-}"
shift 2
;;
--channel)
channel="${2:-}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "error: unknown arg: $1" >&2
usage
exit 2
;;
esac
done
if [[ -z "${host}" || -z "${channel}" ]]; then
echo "error: --host and --channel are required" >&2
usage
exit 2
fi
export EVERY_CHANNEL_E2E_HDHR_HOST="${host}"
export EVERY_CHANNEL_E2E_HDHR_CHANNEL="${channel}"
nix develop --accept-flake-config -c \
bash -lc 'cargo test -p ec-node --test e2e_hdhr_blockchain -- --ignored --nocapture'

257
scripts/hetzner-robot-forge.sh Executable file
View file

@ -0,0 +1,257 @@
#!/usr/bin/env bash
set -euo pipefail
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${root}"
robot_base="${EVERY_CHANNEL_ROBOT_API_BASE:-https://robot-ws.your-server.de}"
server="${EVERY_CHANNEL_ROBOT_SERVER:-2800441}"
host_ip="${EVERY_CHANNEL_FORGE_IP:-95.216.114.54}"
host_name="${EVERY_CHANNEL_FORGE_HOSTNAME:-git.every.channel}"
op_item="${EVERY_CHANNEL_ROBOT_OP_ITEM:-Hetzner Robot}"
op_vault="${EVERY_CHANNEL_ROBOT_OP_VAULT:-}"
ssh_identity="${EVERY_CHANNEL_FORGE_SSH_IDENTITY:-$HOME/.ssh/id_ed25519}"
ssh_pub="${EVERY_CHANNEL_FORGE_SSH_PUBLIC_KEY:-${ssh_identity}.pub}"
usage() {
cat <<EOF
Usage: $0 <command>
Commands:
probe Check public HTTPS and SSH reachability for ${host_name}.
server Query Robot server metadata.
status Query Robot reset and rescue status.
rescue-status Query Robot rescue status.
activate-rescue Activate Linux rescue mode for ${server}.
reset [type] Execute a Robot reset, default: hw.
recover [type] Activate rescue mode, then execute a reset.
wait-ssh Wait until TCP/22 answers on ${host_ip}.
Credentials:
Set EVERY_CHANNEL_ROBOT_USER and EVERY_CHANNEL_ROBOT_PASSWORD, or sign in with
1Password CLI and keep the existing item named "${op_item}" available.
Optional:
EVERY_CHANNEL_ROBOT_OP_ITEM default: Hetzner Robot
EVERY_CHANNEL_ROBOT_OP_VAULT optional 1Password vault scope
EVERY_CHANNEL_ROBOT_AUTHORIZED_KEY_FINGERPRINT
EVERY_CHANNEL_ROBOT_PRINT_SENSITIVE=1 print Robot-generated rescue password
EVERY_CHANNEL_ROBOT_RESCUE_OS=linux
EVERY_CHANNEL_ROBOT_RESCUE_KEYBOARD=us
EVERY_CHANNEL_ROBOT_RESET_TYPE=hw
EOF
}
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "error: required command not found: $1" >&2
exit 2
fi
}
op_field() {
local field="$1"
if [[ -n "${op_vault}" ]]; then
op item get "${op_item}" --vault "${op_vault}" --fields "label=${field}" --reveal
else
op item get "${op_item}" --fields "label=${field}" --reveal
fi
}
load_robot_auth() {
robot_user="${EVERY_CHANNEL_ROBOT_USER:-${HETZNER_ROBOT_USER:-}}"
robot_password="${EVERY_CHANNEL_ROBOT_PASSWORD:-${HETZNER_ROBOT_PASSWORD:-}}"
if [[ -z "${robot_user}" || -z "${robot_password}" ]]; then
require_cmd op
robot_user="${robot_user:-$(op_field username)}"
robot_password="${robot_password:-$(op_field password)}"
fi
if [[ -z "${robot_user}" || -z "${robot_password}" ]]; then
echo "error: Robot credentials are not available" >&2
echo "hint: run 'op signin' or export EVERY_CHANNEL_ROBOT_USER and EVERY_CHANNEL_ROBOT_PASSWORD" >&2
exit 2
fi
}
robot_curl() {
local method="$1"
local path="$2"
shift 2
local config
config="$(mktemp "${TMPDIR:-/tmp}/ec-robot-curl.XXXXXX")"
chmod 600 "${config}"
cleanup_config() {
rm -f "${config}"
}
trap cleanup_config RETURN
{
printf 'url = "%s%s"\n' "${robot_base}" "${path}"
printf 'request = "%s"\n' "${method}"
printf 'user = "%s:%s"\n' "${robot_user}" "${robot_password}"
printf 'silent\nshow-error\nfail\n'
} >"${config}"
curl --config "${config}" "$@"
}
mask_sensitive_json() {
if [[ "${EVERY_CHANNEL_ROBOT_PRINT_SENSITIVE:-0}" == "1" ]]; then
cat
return
fi
if command -v jq >/dev/null 2>&1; then
jq 'walk(if type == "object" and has("password") then .password = "<redacted>" else . end)'
else
sed -E 's/"password"[[:space:]]*:[[:space:]]*"[^"]*"/"password":"<redacted>"/g'
fi
}
authorized_key_fingerprint() {
if [[ -n "${EVERY_CHANNEL_ROBOT_AUTHORIZED_KEY_FINGERPRINT:-}" ]]; then
printf '%s\n' "${EVERY_CHANNEL_ROBOT_AUTHORIZED_KEY_FINGERPRINT}"
return
fi
if [[ -f "${ssh_pub}" ]] && command -v ssh-keygen >/dev/null 2>&1; then
ssh-keygen -E md5 -lf "${ssh_pub}" | awk '{print $2}' | sed 's/^MD5://'
fi
}
probe() {
echo "== HTTPS via DNS =="
curl --max-time 12 -I -sS "https://${host_name}/" || true
echo
echo "== HTTPS via pinned IP =="
curl --max-time 12 -I -sS --resolve "${host_name}:443:${host_ip}" "https://${host_name}/" || true
echo
echo "== SSH =="
ssh -o ConnectTimeout=12 -o BatchMode=yes -o IdentityAgent=none -o IdentitiesOnly=yes \
-i "${ssh_identity}" "root@${host_ip}" echo ssh-ok || true
}
server_metadata() {
load_robot_auth
robot_curl GET "/server/${server}" | mask_sensitive_json
}
reset_status() {
robot_curl GET "/reset/${server}" | mask_sensitive_json
}
rescue_status() {
load_robot_auth
robot_curl GET "/boot/${server}/rescue" | mask_sensitive_json
}
status() {
load_robot_auth
echo "== reset =="
reset_status
echo
echo "== rescue =="
robot_curl GET "/boot/${server}/rescue" | mask_sensitive_json
}
activate_rescue() {
load_robot_auth
local rescue_os="${EVERY_CHANNEL_ROBOT_RESCUE_OS:-linux}"
local keyboard="${EVERY_CHANNEL_ROBOT_RESCUE_KEYBOARD:-us}"
local key_fingerprint
key_fingerprint="$(authorized_key_fingerprint || true)"
local args=(--data-urlencode "os=${rescue_os}" --data-urlencode "keyboard=${keyboard}")
if [[ -n "${key_fingerprint}" ]]; then
args+=(--data-urlencode "authorized_key[]=${key_fingerprint}")
fi
robot_curl POST "/boot/${server}/rescue" "${args[@]}" | mask_sensitive_json
}
reset_server() {
load_robot_auth
local reset_type="${1:-${EVERY_CHANNEL_ROBOT_RESET_TYPE:-hw}}"
robot_curl POST "/reset/${server}" --data-urlencode "type=${reset_type}" | mask_sensitive_json
}
recover() {
local reset_type="${1:-${EVERY_CHANNEL_ROBOT_RESET_TYPE:-hw}}"
echo "== activate rescue =="
activate_rescue
echo
echo "== reset ${reset_type} =="
reset_server "${reset_type}"
echo
echo "Rescue boot requested. Run '$0 wait-ssh' to watch for SSH on ${host_ip}."
}
wait_ssh() {
local deadline="${EVERY_CHANNEL_FORGE_WAIT_SECONDS:-300}"
local started
started="$(date +%s)"
while true; do
if command -v nc >/dev/null 2>&1; then
if nc -z -w 5 "${host_ip}" 22 >/dev/null 2>&1; then
echo "ssh port is reachable on ${host_ip}:22"
return 0
fi
elif ssh -o ConnectTimeout=5 -o BatchMode=yes -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null "root@${host_ip}" true >/dev/null 2>&1; then
echo "ssh is reachable on ${host_ip}"
return 0
fi
if (( "$(date +%s)" - started >= deadline )); then
echo "error: timed out waiting for SSH on ${host_ip}:22" >&2
return 1
fi
sleep 5
done
}
cmd="${1:-}"
case "${cmd}" in
""|-h|--help|help)
usage
;;
probe)
probe
;;
server)
require_cmd curl
server_metadata
;;
status)
require_cmd curl
status
;;
rescue-status)
require_cmd curl
rescue_status
;;
activate-rescue)
require_cmd curl
activate_rescue
;;
reset)
require_cmd curl
reset_server "${2:-}"
;;
recover)
require_cmd curl
recover "${2:-}"
;;
wait-ssh)
wait_ssh
;;
*)
echo "error: unknown command: ${cmd}" >&2
usage >&2
exit 2
;;
esac

View file

@ -7,6 +7,8 @@ l1_rpc_url="${EVERY_CHANNEL_OP_STACK_L1_RPC_URL:-https://ethereum-sepolia-rpc.pu
l1_beacon_url="${EVERY_CHANNEL_OP_STACK_L1_BEACON_URL:-https://ethereum-sepolia-beacon-api.publicnode.com}" l1_beacon_url="${EVERY_CHANNEL_OP_STACK_L1_BEACON_URL:-https://ethereum-sepolia-beacon-api.publicnode.com}"
chain_id="${EVERY_CHANNEL_OP_STACK_CHAIN_ID:-245245}" chain_id="${EVERY_CHANNEL_OP_STACK_CHAIN_ID:-245245}"
p2p_advertise_ip="${EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP:-127.0.0.1}" p2p_advertise_ip="${EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP:-127.0.0.1}"
l2_rpc_url="${EVERY_CHANNEL_OP_STACK_L2_RPC_URL:-http://127.0.0.1:28545}"
rollup_rpc_url="${EVERY_CHANNEL_OP_STACK_ROLLUP_RPC_URL:-http://127.0.0.1:28547}"
op_deployer_bin="${EVERY_CHANNEL_OP_DEPLOYER_BIN:-${root}/bin/op-deployer}" op_deployer_bin="${EVERY_CHANNEL_OP_DEPLOYER_BIN:-${root}/bin/op-deployer}"
download_script="${EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT:-}" download_script="${EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT:-}"
download_tag="${EVERY_CHANNEL_OP_DEPLOYER_TAG:-op-deployer/v0.6.0-rc.3}" download_tag="${EVERY_CHANNEL_OP_DEPLOYER_TAG:-op-deployer/v0.6.0-rc.3}"
@ -36,14 +38,36 @@ trimmed_file_contents() {
tr -d '\r\n' <"$1" tr -d '\r\n' <"$1"
} }
normalize_rollup_config() {
local path="$1"
python - "$path" <<'PY'
import json
import sys
from pathlib import Path
path = Path(sys.argv[1])
data = json.loads(path.read_text())
system_config = data.setdefault("genesis", {}).setdefault("system_config", {})
system_config.pop("daFootprintGasScalar", None)
chain_op_config = data.get("chain_op_config", {})
denominator = chain_op_config.get("eip1559DenominatorCanyon") or chain_op_config.get("eip1559Denominator")
elasticity = chain_op_config.get("eip1559Elasticity")
if (
isinstance(denominator, int)
and isinstance(elasticity, int)
and system_config.get("eip1559Params") in (None, "0x", "0x0000000000000000")
):
system_config["eip1559Params"] = f"0x{denominator:08x}{elasticity:08x}"
path.write_text(json.dumps(data, indent=2, sort_keys=False) + "\n")
PY
}
set_toml_value() { set_toml_value() {
local key="$1" local key="$1"
local value="$2" local value="$2"
local file="$3" local file="$3"
if ! grep -q "^${key} = " "$file"; then
log "missing key ${key} in ${file}"
exit 1
fi
python - "$key" "$value" "$file" <<'PY' python - "$key" "$value" "$file" <<'PY'
import sys import sys
from pathlib import Path from pathlib import Path
@ -53,13 +77,15 @@ needle = f"{key} = "
out = [] out = []
replaced = False replaced = False
for line in text.splitlines(): for line in text.splitlines():
if line.startswith(needle): stripped = line.lstrip()
out.append(f'{key} = "{value}"') if stripped.startswith(needle):
indent = line[:len(line) - len(stripped)]
out.append(f'{indent}{key} = "{value}"')
replaced = True replaced = True
else: else:
out.append(line) out.append(line)
if not replaced: if not replaced:
raise SystemExit(f"failed to replace {key}") raise SystemExit(f"missing key {key} in {path}")
Path(path).write_text("\n".join(out) + "\n") Path(path).write_text("\n".join(out) + "\n")
PY PY
} }
@ -133,24 +159,33 @@ if 'fundDevAccounts = false' not in text:
path.write_text(text) path.write_text(text)
PY PY
if [[ "${skip_apply}" != "true" && ! -f "${deployer_dir}/.deployer/state.json" ]]; then state_json="${deployer_dir}/.deployer/state.json"
if [[ "${skip_apply}" != "true" ]]; then
if [[ ! -f "${state_json}" ]] || ! jq -e \
'.appliedIntent != null and .opChainDeployments != null' \
<"${state_json}" >/dev/null 2>&1
then
"$op_deployer_bin" apply \ "$op_deployer_bin" apply \
--workdir "${deployer_dir}/.deployer" \ --workdir "${deployer_dir}/.deployer" \
--l1-rpc-url "${l1_rpc_url}" \ --l1-rpc-url "${l1_rpc_url}" \
--private-key "${private_key}" --private-key "${private_key}"
fi
fi fi
if [[ ! -f "${deployer_dir}/.deployer/state.json" ]]; then if [[ ! -f "${state_json}" ]] || ! jq -e \
log "state.json missing; bootstrap did not complete" '.appliedIntent != null and .opChainDeployments != null' \
<"${state_json}" >/dev/null 2>&1
then
log "state.json missing or unapplied; bootstrap did not complete"
exit 1 exit 1
fi fi
"$op_deployer_bin" inspect genesis --workdir "${deployer_dir}/.deployer" "${chain_id_hex}" >"${sequencer_dir}/genesis.json" "$op_deployer_bin" inspect genesis --workdir "${deployer_dir}/.deployer" "${chain_id_hex}" >"${sequencer_dir}/genesis.json"
"$op_deployer_bin" inspect rollup --workdir "${deployer_dir}/.deployer" "${chain_id_hex}" >"${sequencer_dir}/rollup.json" "$op_deployer_bin" inspect rollup --workdir "${deployer_dir}/.deployer" "${chain_id_hex}" >"${sequencer_dir}/rollup.json"
normalize_rollup_config "${sequencer_dir}/rollup.json"
openssl rand -hex 32 >"${sequencer_dir}/jwt.txt" openssl rand -hex 32 >"${sequencer_dir}/jwt.txt"
chmod 0600 "${sequencer_dir}/jwt.txt" chmod 0600 "${sequencer_dir}/jwt.txt"
state_json="${deployer_dir}/.deployer/state.json"
system_config_proxy="$(jq -r '.opChainDeployments[0].systemConfigProxyAddress // .opChainDeployments[0].SystemConfigProxy // empty' <"${state_json}")" system_config_proxy="$(jq -r '.opChainDeployments[0].systemConfigProxyAddress // .opChainDeployments[0].SystemConfigProxy // empty' <"${state_json}")"
dispute_game_factory="$(jq -r '.opChainDeployments[0].disputeGameFactoryProxyAddress // .opChainDeployments[0].DisputeGameFactoryProxy // empty' <"${state_json}")" dispute_game_factory="$(jq -r '.opChainDeployments[0].disputeGameFactoryProxyAddress // .opChainDeployments[0].DisputeGameFactoryProxy // empty' <"${state_json}")"
l1_standard_bridge="$(jq -r '.opChainDeployments[0].l1StandardBridgeProxyAddress // .opChainDeployments[0].L1StandardBridgeProxy // empty' <"${state_json}")" l1_standard_bridge="$(jq -r '.opChainDeployments[0].l1StandardBridgeProxyAddress // .opChainDeployments[0].L1StandardBridgeProxy // empty' <"${state_json}")"
@ -184,15 +219,14 @@ EOF
cat > "${batcher_dir}/.env" <<EOF cat > "${batcher_dir}/.env" <<EOF
L1_RPC_URL=${l1_rpc_url} L1_RPC_URL=${l1_rpc_url}
L2_RPC_URL=http://127.0.0.1:8545 L2_RPC_URL=${l2_rpc_url}
ROLLUP_RPC_URL=http://127.0.0.1:8547 ROLLUP_RPC_URL=${rollup_rpc_url}
PRIVATE_KEY=${private_key} PRIVATE_KEY=${private_key}
BATCH_INBOX_ADDRESS=${system_config_proxy}
EOF EOF
cat > "${proposer_dir}/.env" <<EOF cat > "${proposer_dir}/.env" <<EOF
L1_RPC_URL=${l1_rpc_url} L1_RPC_URL=${l1_rpc_url}
ROLLUP_RPC_URL=http://127.0.0.1:8547 ROLLUP_RPC_URL=${rollup_rpc_url}
GAME_FACTORY_ADDRESS=${dispute_game_factory} GAME_FACTORY_ADDRESS=${dispute_game_factory}
PRIVATE_KEY=${private_key} PRIVATE_KEY=${private_key}
PROPOSAL_INTERVAL=3600s PROPOSAL_INTERVAL=3600s
@ -203,8 +237,8 @@ cp "${sequencer_dir}/rollup.json" "${challenger_dir}/rollup.json"
cat > "${challenger_dir}/.env" <<EOF cat > "${challenger_dir}/.env" <<EOF
L1_RPC_URL=${l1_rpc_url} L1_RPC_URL=${l1_rpc_url}
L1_BEACON_URL=${l1_beacon_url} L1_BEACON_URL=${l1_beacon_url}
L2_RPC_URL=http://127.0.0.1:8545 L2_RPC_URL=${l2_rpc_url}
ROLLUP_RPC_URL=http://127.0.0.1:8547 ROLLUP_RPC_URL=${rollup_rpc_url}
GAME_FACTORY_ADDRESS=${dispute_game_factory} GAME_FACTORY_ADDRESS=${dispute_game_factory}
PRIVATE_KEY=${private_key} PRIVATE_KEY=${private_key}
EOF EOF
@ -217,7 +251,7 @@ fi
cat > "${dispute_mon_dir}/.env" <<EOF cat > "${dispute_mon_dir}/.env" <<EOF
OP_DISPUTE_MON_L1_ETH_RPC=${l1_rpc_url} OP_DISPUTE_MON_L1_ETH_RPC=${l1_rpc_url}
OP_DISPUTE_MON_ROLLUP_RPC=http://127.0.0.1:8547 OP_DISPUTE_MON_ROLLUP_RPC=${rollup_rpc_url}
OP_DISPUTE_MON_HONEST_ACTORS=${operator_address} OP_DISPUTE_MON_HONEST_ACTORS=${operator_address}
OP_DISPUTE_MON_GAME_FACTORY_ADDRESS=${dispute_game_factory} OP_DISPUTE_MON_GAME_FACTORY_ADDRESS=${dispute_game_factory}
OP_DISPUTE_MON_MONITOR_INTERVAL=10s OP_DISPUTE_MON_MONITOR_INTERVAL=10s