Wire HDHomeRun observations and recover Forge OP Stack

This commit is contained in:
every.channel 2026-05-03 20:24:04 -07:00
parent 8065860449
commit 0d86104762
No known key found for this signature in database
18 changed files with 1613 additions and 58 deletions

View file

@ -17,6 +17,8 @@ pub const SCHEME_MANIFEST_DATA_ROOT: &str = "manifest-data-merkle-keccak256-v1";
pub const SCHEME_MANIFEST_BODY_ABI: &str = "manifest-body-abi-keccak256-v1";
pub const SCHEME_MANIFEST_ENVELOPE_ABI: &str = "manifest-envelope-abi-keccak256-v1";
pub const ETH_MANIFEST_SIG_ALG: &str = "secp256k1-eip712-manifest-body-v1";
pub const ZERO_B256_HEX: &str =
"0x0000000000000000000000000000000000000000000000000000000000000000";
sol! {
struct EthStreamMetadata {
@ -117,6 +119,16 @@ sol! {
bytes32 manifestId;
EthManifestSignature[] signatures;
}
struct EthObservationHeader {
bytes32 streamHash;
bytes32 epochHash;
bytes32 parentObservationHash;
bytes32 dataRoot;
bytes32 locatorHash;
uint64 observedUnixMs;
uint64 sequence;
}
}
#[derive(Debug, Clone)]
@ -142,7 +154,7 @@ fn commitment(scheme: &str, digest: B256) -> ChainCommitment {
ChainCommitment {
chain: ETHEREUM_CHAIN.to_string(),
scheme: scheme.to_string(),
digest: format!("0x{}", hex::encode(digest)),
digest: b256_hex(digest),
}
}
@ -150,7 +162,15 @@ fn abi_commitment<T: SolValue>(scheme: &str, value: &T) -> ChainCommitment {
commitment(scheme, keccak256(value.abi_encode()))
}
fn parse_b256(value: &str) -> Result<B256, EthCommitmentError> {
pub fn b256_hex(value: B256) -> String {
format!("0x{}", hex::encode(value))
}
pub fn keccak256_bytes_hex(value: &[u8]) -> String {
b256_hex(keccak256(value))
}
pub fn parse_b256(value: &str) -> Result<B256, EthCommitmentError> {
let trimmed = value.trim().strip_prefix("0x").unwrap_or(value.trim());
let bytes =
hex::decode(trimmed).map_err(|_| EthCommitmentError::InvalidHex(value.to_string()))?;
@ -540,6 +560,49 @@ pub fn manifest_commitments(value: &Manifest) -> Result<Vec<ChainCommitment>, Et
])
}
pub fn manifest_commitment_digest(
value: &Manifest,
scheme: &str,
) -> Result<Option<String>, EthCommitmentError> {
Ok(manifest_commitments(value)?
.into_iter()
.find(|commitment| commitment.scheme == scheme)
.map(|commitment| commitment.digest))
}
pub fn manifest_observation_header(
value: &Manifest,
parent_observation_hash: Option<&str>,
locator_hash: &str,
sequence: u64,
) -> Result<EthObservationHeader, EthCommitmentError> {
let data_root = manifest_commitment_digest(value, SCHEME_MANIFEST_DATA_ROOT)?
.ok_or(EthCommitmentError::Empty)?;
Ok(EthObservationHeader {
streamHash: keccak256(value.body.stream_id.0.as_bytes()),
epochHash: keccak256(value.body.epoch_id.as_bytes()),
parentObservationHash: parse_b256(parent_observation_hash.unwrap_or(ZERO_B256_HEX))?,
dataRoot: parse_b256(&data_root)?,
locatorHash: parse_b256(locator_hash)?,
observedUnixMs: value.body.created_unix_ms,
sequence,
})
}
pub fn observation_header_hash(value: &EthObservationHeader) -> String {
b256_hex(keccak256(value.abi_encode()))
}
pub fn observation_slot_hash(
stream_hash: &str,
epoch_hash: &str,
) -> Result<String, EthCommitmentError> {
let stream_hash = parse_b256(stream_hash)?;
let epoch_hash = parse_b256(epoch_hash)?;
Ok(b256_hex(keccak256((stream_hash, epoch_hash).abi_encode())))
}
pub fn manifest_commitments_match(value: &Manifest) -> Result<bool, EthCommitmentError> {
let present = value
.commitments
@ -621,6 +684,37 @@ mod tests {
assert_eq!(h1, h2);
}
#[test]
fn manifest_observation_header_uses_manifest_data_root() {
let body = sample_body();
let manifest_id = body.manifest_id().unwrap();
let mut manifest = Manifest {
body,
manifest_id,
signatures: Vec::new(),
commitments: Vec::new(),
};
manifest.commitments = manifest_commitments(&manifest).unwrap();
let locator_hash = keccak256_bytes_hex(b"locator");
let header = manifest_observation_header(&manifest, None, &locator_hash, 7).unwrap();
assert_eq!(header.sequence, 7);
assert_eq!(header.observedUnixMs, manifest.body.created_unix_ms);
assert_eq!(
b256_hex(header.dataRoot),
manifest_commitment_digest(&manifest, SCHEME_MANIFEST_DATA_ROOT)
.unwrap()
.unwrap()
);
assert_eq!(
observation_slot_hash(&b256_hex(header.streamHash), &b256_hex(header.epochHash))
.unwrap(),
b256_hex(keccak256(
(header.streamHash, header.epochHash).abi_encode()
))
);
}
#[test]
fn stream_descriptor_commitments_include_stream_id_and_descriptor_hashes() {
let descriptor = StreamDescriptor {

View file

@ -0,0 +1,220 @@
use anyhow::{anyhow, Context, Result};
use ec_core::Manifest;
use ec_eth::{
b256_hex, keccak256_bytes_hex, manifest_observation_header, observation_header_hash,
observation_slot_hash,
};
use serde::Serialize;
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
use tokio::process::Command;
pub const OBSERVATION_RPC_URL_ENV: &str = "EVERY_CHANNEL_OBSERVATION_RPC_URL";
pub const OBSERVATION_LEDGER_ENV: &str = "EVERY_CHANNEL_OBSERVATION_LEDGER";
pub const OBSERVATION_PRIVATE_KEY_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY";
pub const OBSERVATION_PRIVATE_KEY_FILE_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PRIVATE_KEY_FILE";
pub const OBSERVATION_PARENT_HASH_ENV: &str = "EVERY_CHANNEL_OBSERVATION_PARENT_HASH";
#[derive(Debug, Clone)]
pub struct ObservationSinkOptions {
pub rpc_url: Option<String>,
pub ledger: Option<String>,
pub private_key: Option<String>,
pub private_key_file: Option<PathBuf>,
pub parent_hash: Option<String>,
pub timeout_ms: u64,
}
#[derive(Debug, Clone)]
pub struct ObservationSink {
rpc_url: String,
ledger: String,
private_key: String,
parent_hash: Option<String>,
timeout: Duration,
}
#[derive(Debug, Clone, Serialize)]
pub struct ManifestObservationLocator {
pub transport: String,
pub broadcast_name: String,
pub track_name: String,
pub manifest_track: String,
pub stream_id: String,
pub epoch_id: String,
pub manifest_id: String,
}
#[derive(Debug, Clone)]
pub struct SubmittedObservation {
pub observation_hash: String,
pub slot_hash: String,
pub stream_hash: String,
pub epoch_hash: String,
pub data_root: String,
pub locator_hash: String,
pub sequence: u64,
}
fn env_value(name: &str) -> Option<String> {
std::env::var(name)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
fn normalize_private_key(value: &str) -> Result<String> {
let trimmed = value.trim().strip_prefix("0x").unwrap_or(value.trim());
let bytes = hex::decode(trimmed).context("observation private key must be hex")?;
if bytes.len() != 32 {
return Err(anyhow!("observation private key must be 32 bytes"));
}
Ok(format!("0x{}", hex::encode(bytes)))
}
fn read_private_key_file(path: &PathBuf) -> Result<String> {
fs::read_to_string(path)
.with_context(|| format!("failed to read observation private key {}", path.display()))
.and_then(|value| normalize_private_key(&value))
}
impl ObservationSink {
pub fn from_options(options: ObservationSinkOptions) -> Result<Option<Self>> {
let rpc_url = options
.rpc_url
.or_else(|| env_value(OBSERVATION_RPC_URL_ENV));
let ledger = options.ledger.or_else(|| env_value(OBSERVATION_LEDGER_ENV));
let private_key = options
.private_key
.or_else(|| env_value(OBSERVATION_PRIVATE_KEY_ENV));
let private_key_file = options
.private_key_file
.or_else(|| env_value(OBSERVATION_PRIVATE_KEY_FILE_ENV).map(PathBuf::from));
let parent_hash = options
.parent_hash
.or_else(|| env_value(OBSERVATION_PARENT_HASH_ENV));
if rpc_url.is_none()
&& ledger.is_none()
&& private_key.is_none()
&& private_key_file.is_none()
{
return Ok(None);
}
let rpc_url = rpc_url.ok_or_else(|| {
anyhow!(
"set --observation-rpc-url or {} for observation submission",
OBSERVATION_RPC_URL_ENV
)
})?;
let ledger = ledger.ok_or_else(|| {
anyhow!(
"set --observation-ledger or {} for observation submission",
OBSERVATION_LEDGER_ENV
)
})?;
let private_key = match (private_key, private_key_file) {
(Some(value), _) => normalize_private_key(&value)?,
(None, Some(path)) => read_private_key_file(&path)?,
(None, None) => {
return Err(anyhow!(
"set --observation-private-key, --observation-private-key-file, {}, or {}",
OBSERVATION_PRIVATE_KEY_ENV,
OBSERVATION_PRIVATE_KEY_FILE_ENV
))
}
};
Ok(Some(Self {
rpc_url,
ledger,
private_key,
parent_hash,
timeout: Duration::from_millis(options.timeout_ms.max(1_000)),
}))
}
pub async fn submit_manifest(
&self,
manifest: &Manifest,
locator: ManifestObservationLocator,
) -> Result<SubmittedObservation> {
let locator_json =
serde_json::to_string(&locator).context("failed to encode observation locator")?;
let locator_hash = keccak256_bytes_hex(locator_json.as_bytes());
let sequence = manifest.body.chunk_start_index;
let header = manifest_observation_header(
manifest,
self.parent_hash.as_deref(),
&locator_hash,
sequence,
)
.map_err(|err| anyhow!(err))?;
let stream_hash = b256_hex(header.streamHash);
let epoch_hash = b256_hex(header.epochHash);
let data_root = b256_hex(header.dataRoot);
let observation_hash = observation_header_hash(&header);
let slot_hash =
observation_slot_hash(&stream_hash, &epoch_hash).map_err(|err| anyhow!(err))?;
let tuple = format!(
"({},{},{},{},{},{},{})",
stream_hash,
epoch_hash,
b256_hex(header.parentObservationHash),
data_root,
locator_hash,
header.observedUnixMs,
header.sequence
);
let mut cmd = Command::new("cast");
cmd.arg("send")
.arg(&self.ledger)
.arg("proposeObservation((bytes32,bytes32,bytes32,bytes32,bytes32,uint64,uint64))")
.arg(&tuple)
.arg("--rpc-url")
.arg(&self.rpc_url)
.arg("--private-key")
.arg(&self.private_key);
let output = tokio::time::timeout(self.timeout, cmd.output())
.await
.context("timed out submitting observation")?
.context("failed to run cast for observation submission")?;
if !output.status.success() {
return Err(anyhow!(
"cast observation submission failed: {}",
String::from_utf8_lossy(&output.stderr).trim()
));
}
Ok(SubmittedObservation {
observation_hash,
slot_hash,
stream_hash,
epoch_hash,
data_root,
locator_hash,
sequence,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normalize_private_key_accepts_prefixed_and_plain_hex() {
let plain = "11".repeat(32);
assert_eq!(normalize_private_key(&plain).unwrap(), format!("0x{plain}"));
assert_eq!(
normalize_private_key(&format!("0x{plain}")).unwrap(),
format!("0x{plain}")
);
assert!(normalize_private_key("abcd").is_err());
}
}

View file

@ -1,10 +1,12 @@
//! Node runner: orchestrates ingest, chunking, and MoQ publication.
mod blockchain;
mod nbc;
mod source;
use anyhow::{anyhow, Context, Result};
use blake3;
use blockchain::{ManifestObservationLocator, ObservationSink, ObservationSinkOptions};
use clap::ValueEnum;
use clap::{Parser, Subcommand};
use ec_chopper::{build_manifest_body_for_chunks, TsChunk};
@ -216,6 +218,24 @@ struct MoqPublishArgs {
/// Publish a CMAF ladder (multiple quality variants) using x264/AAC.
#[arg(long, value_enum)]
cmaf_ladder: Option<CmafLadderPreset>,
/// RPC URL for submitting manifest-derived observation headers.
#[arg(long)]
observation_rpc_url: Option<String>,
/// Deployed EveryChannelObservationLedger address.
#[arg(long)]
observation_ledger: Option<String>,
/// Witness private key for observation transactions (hex). Prefer --observation-private-key-file for services.
#[arg(long)]
observation_private_key: Option<String>,
/// File containing the witness private key for observation transactions.
#[arg(long)]
observation_private_key_file: Option<PathBuf>,
/// Parent observation hash to put into new headers.
#[arg(long)]
observation_parent_hash: Option<String>,
/// Maximum time to wait for each observation transaction.
#[arg(long, default_value_t = 30000)]
observation_timeout_ms: u64,
#[command(subcommand)]
source: IngestSource,
}
@ -1224,6 +1244,40 @@ fn build_manifest(
Ok(manifest)
}
async fn submit_manifest_observation(
sink: Option<&ObservationSink>,
manifest: &Manifest,
broadcast_name: &str,
track_name: &str,
manifest_track: &str,
) -> Result<()> {
let Some(sink) = sink else {
return Ok(());
};
let locator = ManifestObservationLocator {
transport: "moq-publish".to_string(),
broadcast_name: broadcast_name.to_string(),
track_name: track_name.to_string(),
manifest_track: manifest_track.to_string(),
stream_id: manifest.body.stream_id.0.clone(),
epoch_id: manifest.body.epoch_id.clone(),
manifest_id: manifest.manifest_id.clone(),
};
let submitted = sink.submit_manifest(manifest, locator).await?;
eprintln!(
"observation submitted: observation_hash={} slot_hash={} stream_hash={} epoch_hash={} data_root={} locator_hash={} sequence={}",
submitted.observation_hash,
submitted.slot_hash,
submitted.stream_hash,
submitted.epoch_hash,
submitted.data_root,
submitted.locator_hash,
submitted.sequence
);
Ok(())
}
#[derive(Debug, Clone)]
struct CmafVariantSpec {
id: String,
@ -1626,9 +1680,9 @@ fn flush_epoch_publish(
object_sequence: &mut u64,
manifest_sequence: &mut u64,
announce_tx: Option<&tokio::sync::mpsc::UnboundedSender<ManifestSummary>>,
) -> Result<()> {
) -> Result<Option<Manifest>> {
if epoch_buffer.is_empty() {
return Ok(());
return Ok(None);
}
let (chunks, datas, hashes) = epoch_buffer.take();
@ -1642,6 +1696,7 @@ fn flush_epoch_publish(
.as_millis() as u64;
let mut manifest_id = None;
let mut published_manifest = None;
if publish_manifests {
let manifest = build_manifest(
stream_id_value.clone(),
@ -1662,6 +1717,7 @@ fn flush_epoch_publish(
if let Some(tx) = announce_tx {
let _ = tx.send(manifest.summary());
}
published_manifest = Some(manifest);
}
// Compute per-chunk Merkle proofs so subscribers can validate membership
@ -1703,7 +1759,7 @@ fn flush_epoch_publish(
}
}
Ok(())
Ok(published_manifest)
}
#[cfg(test)]
@ -1991,6 +2047,15 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
fs::create_dir_all(&args.chunk_dir)
.with_context(|| format!("failed to create {}", args.chunk_dir.display()))?;
let observation_sink = ObservationSink::from_options(ObservationSinkOptions {
rpc_url: args.observation_rpc_url.clone(),
ledger: args.observation_ledger.clone(),
private_key: args.observation_private_key.clone(),
private_key_file: args.observation_private_key_file.clone(),
parent_hash: args.observation_parent_hash.clone(),
timeout_ms: args.observation_timeout_ms,
})?;
let deterministic = deterministic_enabled(args.deterministic);
let (source, _needs_transcode): (Box<dyn StreamSource + Send>, bool) = match args.source {
IngestSource::Hls { url, mut mode } => {
@ -2451,6 +2516,14 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
manifest,
} => {
publish_set.publish_manifest(&track, sequence, &manifest)?;
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&track,
)
.await?;
}
}
}
@ -2673,7 +2746,7 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
PendingKind::Segment => {
epoch_buffer.push(pending.chunk, pending.data, pending.hash);
if epoch_buffer.is_full() {
flush_epoch_publish(
if let Some(manifest) = flush_epoch_publish(
&mut publish_set,
&track_name,
&args.manifest_track,
@ -2690,7 +2763,16 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
&mut object_sequence,
&mut manifest_sequence,
announce_tx.as_ref(),
)?;
)? {
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&args.manifest_track,
)
.await?;
}
}
}
}
@ -2701,7 +2783,7 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
.await
.map_err(|err| anyhow!("chunk task join error: {err}"))??;
flush_epoch_publish(
if let Some(manifest) = flush_epoch_publish(
&mut publish_set,
&track_name,
&args.manifest_track,
@ -2718,7 +2800,16 @@ async fn moq_publish(args: MoqPublishArgs) -> Result<()> {
&mut object_sequence,
&mut manifest_sequence,
announce_tx.as_ref(),
)?;
)? {
submit_manifest_observation(
observation_sink.as_ref(),
&manifest,
&broadcast_name,
&track_name,
&args.manifest_track,
)
.await?;
}
Ok(())
}

View file

@ -0,0 +1,275 @@
use std::fs;
use std::io::{BufRead, BufReader};
use std::process::{Child, Command, Stdio};
use std::time::{Duration, Instant};
const ANVIL_PK0: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80";
const ANVIL_PK1: &str = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn looks_drm(value: &str) -> bool {
let value = value.to_lowercase();
value.contains("drm")
|| value.contains("encrypted")
|| value.contains("protected")
|| value.contains("copy")
|| value.contains("widevine")
}
fn autodiscover_hdhr_host_and_channel() -> Option<(String, String)> {
let devices = ec_hdhomerun::discover().ok()?;
let device = devices.into_iter().next()?;
let lineup = ec_hdhomerun::fetch_lineup(&device).ok()?;
let entry = lineup.iter().find(|e| {
let tag_drm = e.tags.iter().any(|t| looks_drm(t));
let raw_drm = e
.raw
.as_object()
.map(|obj| {
obj.iter()
.any(|(k, v)| looks_drm(k) || looks_drm(&v.to_string()))
})
.unwrap_or(false);
!tag_drm && !raw_drm && e.channel.number.as_deref().unwrap_or("").trim() != ""
})?;
let host = device.ip.clone();
let channel = entry
.channel
.number
.clone()
.or_else(|| Some(entry.channel.name.clone()))
.unwrap_or_else(|| "2.1".to_string());
Some((host, channel))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn repo_root() -> std::path::PathBuf {
std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("workspace root")
.to_path_buf()
}
fn require_tools() -> bool {
["anvil", "cast", "forge", "ffmpeg"]
.into_iter()
.all(|tool| which::which(tool).is_ok())
}
fn wait_for_anvil(rpc_url: &str) {
let deadline = Instant::now() + Duration::from_secs(20);
while Instant::now() < deadline {
let status = Command::new("cast")
.arg("block-number")
.arg("--rpc-url")
.arg(rpc_url)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status();
if matches!(status, Ok(status) if status.success()) {
return;
}
std::thread::sleep(Duration::from_millis(250));
}
panic!("anvil did not become ready");
}
struct ChildGuard {
child: Option<Child>,
}
impl ChildGuard {
fn new(child: Child) -> Self {
Self { child: Some(child) }
}
}
impl Drop for ChildGuard {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
let _ = child.kill();
let _ = child.wait();
}
}
}
fn parse_field(line: &str, field: &str) -> Option<String> {
let prefix = format!("{field}=");
line.split_whitespace()
.find_map(|part| part.strip_prefix(&prefix).map(|value| value.to_string()))
}
#[test]
#[ignore]
fn e2e_hdhr_manifest_observation_finalizes_on_anvil() {
if !require_tools() {
return;
}
let host = env_required("EVERY_CHANNEL_E2E_HDHR_HOST");
let channel = env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL");
let (host, channel) = match (host, channel) {
(Some(host), Some(channel)) => (host, channel),
_ => match autodiscover_hdhr_host_and_channel() {
Some(v) => v,
None => return,
},
};
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let tmp = std::env::temp_dir().join(format!("ec-e2e-hdhr-chain-{ts}"));
fs::create_dir_all(&tmp).unwrap();
let port = 18_545 + (ts % 10_000) as u16;
let rpc_url = format!("http://127.0.0.1:{port}");
let anvil_log = fs::File::create(tmp.join("anvil.log")).unwrap();
let _anvil = ChildGuard::new(
Command::new("anvil")
.arg("--port")
.arg(port.to_string())
.stdout(anvil_log)
.stderr(Stdio::null())
.spawn()
.expect("failed to spawn anvil"),
);
wait_for_anvil(&rpc_url);
let owner_file = tmp.join("owner.key");
fs::write(&owner_file, ANVIL_PK0).unwrap();
let deploy_json = tmp.join("observation-ledger-deploy.json");
let deploy = Command::new(repo_root().join("scripts/op-stack/deploy-observation-ledger.sh"))
.current_dir(repo_root())
.env("EVERY_CHANNEL_RPC_URL", &rpc_url)
.env("EVERY_CHANNEL_PRIVATE_KEY_FILE", &owner_file)
.env("EVERY_CHANNEL_OBSERVATION_QUORUM", "1")
.env("EVERY_CHANNEL_OBSERVATION_DEPLOY_OUT", &deploy_json)
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.status()
.expect("failed to deploy observation ledger");
assert!(deploy.success(), "deploy failed with {deploy}");
let deploy_value: serde_json::Value =
serde_json::from_slice(&fs::read(&deploy_json).unwrap()).unwrap();
let registry = deploy_value["registry"].as_str().unwrap();
let ledger = deploy_value["ledger"].as_str().unwrap();
let witness = Command::new("cast")
.arg("wallet")
.arg("address")
.arg("--private-key")
.arg(ANVIL_PK1)
.output()
.expect("failed to derive witness address");
assert!(witness.status.success());
let witness = String::from_utf8(witness.stdout).unwrap();
let witness = witness.trim();
let add_witness = Command::new("cast")
.arg("send")
.arg(registry)
.arg("addWitness(address)")
.arg(witness)
.arg("--rpc-url")
.arg(&rpc_url)
.arg("--private-key")
.arg(ANVIL_PK0)
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.status()
.expect("failed to add witness");
assert!(
add_witness.success(),
"add witness failed with {add_witness}"
);
let ec_node = ec_node_path();
let broadcast_name = format!("every.channel/e2e/blockchain/{ts}");
let mut publisher = Command::new(&ec_node);
publisher
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", "11".repeat(32))
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("1")
.arg("--chunk-ms")
.arg("2000")
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--observation-rpc-url")
.arg(&rpc_url)
.arg("--observation-ledger")
.arg(ledger)
.arg("--observation-private-key")
.arg(ANVIL_PK1)
.arg("--chunk-dir")
.arg(tmp.join("chunks"))
.arg("hdhr")
.arg("--host")
.arg(&host)
.arg("--channel")
.arg(&channel)
.stdout(Stdio::null())
.stderr(Stdio::piped());
let mut child = publisher.spawn().expect("failed to spawn publisher");
let stderr = child.stderr.take().expect("publisher stderr missing");
let lines = BufReader::new(stderr)
.lines()
.filter_map(|line| line.ok())
.collect::<Vec<_>>();
let status = child.wait().expect("failed to wait for publisher");
assert!(
status.success(),
"publisher exited with {status}: {lines:?}"
);
let observation_line = lines
.iter()
.find(|line| line.starts_with("observation submitted:"))
.expect("publisher did not submit an observation");
let observation_hash = parse_field(observation_line, "observation_hash").unwrap();
let slot_hash = parse_field(observation_line, "slot_hash").unwrap();
let finalized = Command::new("cast")
.arg("call")
.arg(ledger)
.arg("finalizedObservationBySlot(bytes32)(bytes32)")
.arg(slot_hash)
.arg("--rpc-url")
.arg(&rpc_url)
.output()
.expect("failed to read finalized slot");
assert!(finalized.status.success());
let finalized = String::from_utf8(finalized.stdout).unwrap();
assert_eq!(finalized.trim(), observation_hash);
}