every.channel: sanitized baseline

This commit is contained in:
every.channel 2026-02-15 16:17:27 -05:00
commit 897e556bea
No known key found for this signature in database
258 changed files with 74298 additions and 0 deletions

View file

@ -0,0 +1,13 @@
[package]
name = "ec-chopper"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
ac-ffmpeg = "0.19.0"
anyhow.workspace = true
blake3.workspace = true
ec-core = { path = "../ec-core" }
ec-ts = { path = "../ec-ts" }
serde.workspace = true

View file

@ -0,0 +1,899 @@
//! Deterministic chunking and transcode scaffolding.
use ac_ffmpeg::format::{
demuxer::Demuxer,
io::IO,
muxer::{Muxer, OutputFormat},
};
use anyhow::{anyhow, Context, Result};
use ec_core::{
merkle_root_from_hashes, DeterminismProfile, ManifestBody, StreamId, StreamMetadata,
};
use ec_ts::{SectionAssembler, TimeSyncEngine, TimeSyncUpdate, TsReader};
use serde::{Deserialize, Serialize};
use std::fs;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamProbe {
pub index: usize,
pub kind: String,
pub decoder: Option<String>,
pub width: Option<usize>,
pub height: Option<usize>,
pub sample_rate: Option<u32>,
pub channels: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ChunkFormat {
Fmp4,
MpegTs,
Matroska,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkerConfig {
pub output_dir: PathBuf,
pub segment_duration_ms: u64,
pub segment_template: String,
pub format: ChunkFormat,
pub profile: DeterminismProfile,
}
impl ChunkerConfig {
pub fn default_segment_template() -> String {
"segment_%06d.m4s".to_string()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkSegment {
pub index: usize,
pub path: PathBuf,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkManifest {
pub output_dir: PathBuf,
pub segments: Vec<ChunkSegment>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TsChunk {
pub index: u64,
pub path: PathBuf,
pub timing: ChunkTiming,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HashedTsChunk {
pub index: u64,
pub path: PathBuf,
pub timing: ChunkTiming,
pub hash: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HashedTsChunkManifest {
pub output_dir: PathBuf,
pub chunks: Vec<HashedTsChunk>,
pub merkle_root: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkTiming {
pub chunk_index: u64,
pub chunk_start_27mhz: Option<u64>,
pub chunk_duration_27mhz: u64,
pub utc_start_unix: Option<i64>,
pub sync_status: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TsChunkManifest {
pub output_dir: PathBuf,
pub chunks: Vec<TsChunk>,
}
#[derive(Debug, Clone)]
pub enum ChunkerInput {
Url(String),
File(PathBuf),
}
#[derive(Debug)]
pub struct SegmenterProcess {
pub child: Child,
pub output_dir: PathBuf,
}
#[derive(Debug, Clone)]
pub struct FfmpegCliSegmenter {
pub ffmpeg_bin: PathBuf,
}
impl Default for FfmpegCliSegmenter {
fn default() -> Self {
Self {
ffmpeg_bin: PathBuf::from("ffmpeg"),
}
}
}
impl FfmpegCliSegmenter {
pub fn spawn(&self, input: ChunkerInput, config: &ChunkerConfig) -> Result<SegmenterProcess> {
fs::create_dir_all(&config.output_dir)
.with_context(|| format!("failed to create {}", config.output_dir.display()))?;
let input_arg = match input {
ChunkerInput::Url(url) => url,
ChunkerInput::File(path) => path
.to_str()
.ok_or_else(|| anyhow!("invalid input path"))?
.to_string(),
};
let segment_time = format!("{:.3}", config.segment_duration_ms as f64 / 1000.0);
let output_template = config.output_dir.join(&config.segment_template);
let output_template = output_template
.to_str()
.ok_or_else(|| anyhow!("invalid output template path"))?
.to_string();
let mut cmd = Command::new(&self.ffmpeg_bin);
cmd.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-y")
.arg("-i")
.arg(&input_arg);
for arg in ffmpeg_profile_args(&config.profile) {
cmd.arg(arg);
}
cmd.arg("-f")
.arg("segment")
.arg("-segment_time")
.arg(segment_time)
.arg("-reset_timestamps")
.arg("1")
.arg("-segment_format")
.arg(segment_format_arg(&config.format))
.arg(&output_template)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::inherit());
let child = cmd
.spawn()
.with_context(|| "failed to spawn ffmpeg".to_string())?;
Ok(SegmenterProcess {
child,
output_dir: config.output_dir.clone(),
})
}
}
pub fn collect_segments(output_dir: &Path) -> Result<ChunkManifest> {
let mut entries = fs::read_dir(output_dir)?
.filter_map(Result::ok)
.filter(|entry| entry.file_type().map(|t| t.is_file()).unwrap_or(false))
.map(|entry| entry.path())
.collect::<Vec<_>>();
entries.sort();
let segments = entries
.into_iter()
.enumerate()
.map(|(index, path)| ChunkSegment { index, path })
.collect();
Ok(ChunkManifest {
output_dir: output_dir.to_path_buf(),
segments,
})
}
pub fn probe_read_stream<T: Read>(stream: T) -> Result<Vec<StreamProbe>> {
let io = IO::from_read_stream(stream);
let demuxer = Demuxer::builder()
.build(io)
.map_err(|err| anyhow!(err.to_string()))?;
let demuxer = demuxer
.find_stream_info(Some(Duration::from_secs(2)))
.map_err(|(_, err)| anyhow!(err.to_string()))?;
let mut probes = Vec::new();
for (index, stream) in demuxer.streams().iter().enumerate() {
let params = stream.codec_parameters();
let mut probe = StreamProbe {
index,
kind: if params.is_video_codec() {
"video".to_string()
} else if params.is_audio_codec() {
"audio".to_string()
} else if params.is_subtitle_codec() {
"subtitle".to_string()
} else {
"data".to_string()
},
decoder: params.decoder_name().map(|name| name.to_string()),
width: None,
height: None,
sample_rate: None,
channels: None,
};
if let Some(video) = params.as_video_codec_parameters() {
probe.width = Some(video.width());
probe.height = Some(video.height());
}
if let Some(audio) = params.as_audio_codec_parameters() {
probe.sample_rate = Some(audio.sample_rate());
probe.channels = Some(audio.channel_layout().channels());
}
probes.push(probe);
}
Ok(probes)
}
pub fn analyze_ts_time<T: Read>(
stream: T,
chunk_duration_ms: u64,
max_events: usize,
) -> Result<Vec<TimeSyncUpdate>> {
let mut reader = TsReader::new(stream);
let mut assembler = SectionAssembler::default();
let mut engine = TimeSyncEngine::new(chunk_duration_ms);
let mut events = Vec::new();
while let Some(packet) = reader.read_packet()? {
for update in engine.ingest_packet(&packet, &mut assembler) {
events.push(update);
if events.len() >= max_events {
return Ok(events);
}
}
}
Ok(events)
}
pub fn chunk_ts_stream<T: Read>(
stream: T,
output_dir: &Path,
chunk_duration_ms: u64,
max_chunks: Option<usize>,
) -> Result<TsChunkManifest> {
let mut chunks = Vec::new();
chunk_ts_stream_live(stream, output_dir, chunk_duration_ms, max_chunks, |chunk| {
chunks.push(chunk);
Ok(())
})?;
Ok(TsChunkManifest {
output_dir: output_dir.to_path_buf(),
chunks,
})
}
pub fn chunk_ts_stream_live<T: Read, F: FnMut(TsChunk) -> Result<()>>(
stream: T,
output_dir: &Path,
chunk_duration_ms: u64,
max_chunks: Option<usize>,
mut on_chunk: F,
) -> Result<()> {
fs::create_dir_all(output_dir)
.with_context(|| format!("failed to create {}", output_dir.display()))?;
let mut reader = TsReader::new(stream);
let mut assembler = SectionAssembler::default();
let mut engine = TimeSyncEngine::new(chunk_duration_ms);
let mut current_index: Option<u64> = None;
let mut current_file: Option<std::fs::File> = None;
let mut current_timing: Option<ChunkTiming> = None;
let mut emitted = 0usize;
let mut close_and_emit =
|index: u64, timing: ChunkTiming, file: std::fs::File| -> Result<bool> {
drop(file);
let path = chunk_path(output_dir, index);
on_chunk(TsChunk {
index,
path,
timing,
})?;
emitted += 1;
Ok(max_chunks.map(|limit| emitted >= limit).unwrap_or(false))
};
while let Some(packet) = reader.read_packet()? {
let updates = engine.ingest_packet(&packet, &mut assembler);
for update in updates {
if update.discontinuity {
if let (Some(index), Some(timing), Some(file)) = (
current_index.take(),
current_timing.take(),
current_file.take(),
) {
if close_and_emit(index, timing, file)? {
return Ok(());
}
}
}
if let Some(index) = update.chunk_index {
if current_index != Some(index) {
if let (Some(prev_index), Some(timing), Some(file)) = (
current_index.take(),
current_timing.take(),
current_file.take(),
) {
if close_and_emit(prev_index, timing, file)? {
return Ok(());
}
}
let path = chunk_path(output_dir, index);
let file = std::fs::File::create(&path)
.with_context(|| format!("failed to create {}", path.display()))?;
current_file = Some(file);
current_index = Some(index);
current_timing = Some(ChunkTiming {
chunk_index: index,
chunk_start_27mhz: update.chunk_start_27mhz,
chunk_duration_27mhz: chunk_duration_ms * 27_000,
utc_start_unix: update.utc_start_unix,
sync_status: if update.synced {
"synced".to_string()
} else {
"unsynced".to_string()
},
});
}
}
}
if let Some(file) = current_file.as_mut() {
file.write_all(packet.as_bytes())?;
}
}
if let (Some(index), Some(timing), Some(file)) = (
current_index.take(),
current_timing.take(),
current_file.take(),
) {
let _ = close_and_emit(index, timing, file);
}
Ok(())
}
fn chunk_path(output_dir: &Path, index: u64) -> PathBuf {
output_dir.join(format!("chunk_{index:010}.ts"))
}
pub fn hash_file_blake3(path: &Path) -> Result<String> {
let mut file =
fs::File::open(path).with_context(|| format!("failed to open {}", path.display()))?;
let mut hasher = blake3::Hasher::new();
let mut buffer = [0u8; 8192];
loop {
let read = file.read(&mut buffer)?;
if read == 0 {
break;
}
hasher.update(&buffer[..read]);
}
Ok(hasher.finalize().to_hex().to_string())
}
pub fn chunk_stream_ffmpeg<T: Read>(
stream: T,
output_dir: &Path,
chunk_duration_ms: u64,
max_chunks: Option<usize>,
) -> Result<TsChunkManifest> {
fs::create_dir_all(output_dir)
.with_context(|| format!("failed to create {}", output_dir.display()))?;
let io = IO::from_read_stream(stream);
let demuxer = Demuxer::builder()
.build(io)
.map_err(|err| anyhow!(err.to_string()))?;
let demuxer = demuxer
.find_stream_info(Some(Duration::from_secs(2)))
.map_err(|(_, err)| anyhow!(err.to_string()))?;
let stream_info = demuxer
.streams()
.iter()
.map(|stream| (stream.codec_parameters(), stream.time_base()))
.collect::<Vec<_>>();
let mut demuxer = demuxer.into_demuxer();
let chunk_duration_micros = chunk_duration_ms as i64 * 1000;
let mut chunks = Vec::new();
let mut current_index: Option<u64> = None;
let mut current_muxer: Option<Muxer<std::fs::File>> = None;
let mut current_timing: Option<ChunkTiming> = None;
loop {
let Some(packet) = demuxer.take().map_err(|err| anyhow!(err.to_string()))? else {
break;
};
let ts = packet
.pts()
.as_micros()
.or_else(|| packet.dts().as_micros());
let chunk_index = ts
.and_then(|micros| {
if micros < 0 {
None
} else {
Some((micros / chunk_duration_micros) as u64)
}
})
.or(current_index);
if let Some(index) = chunk_index {
if current_index != Some(index) {
if let Some(mut muxer) = current_muxer.take() {
muxer.flush().map_err(|err| anyhow!(err.to_string()))?;
let _ = muxer.close();
}
if let (Some(prev_index), Some(timing)) =
(current_index.take(), current_timing.take())
{
chunks.push(TsChunk {
index: prev_index,
path: chunk_path(output_dir, prev_index),
timing,
});
}
let path = chunk_path(output_dir, index);
let file = std::fs::File::create(&path)
.with_context(|| format!("failed to create {}", path.display()))?;
let io = IO::from_write_stream(file);
let mut builder = Muxer::builder();
for (params, _) in &stream_info {
builder
.add_stream(params)
.map_err(|err| anyhow!(err.to_string()))?;
}
for (stream, (_, tb)) in builder.streams_mut().iter_mut().zip(stream_info.iter()) {
stream.set_time_base(*tb);
}
let format = OutputFormat::find_by_name("mpegts")
.ok_or_else(|| anyhow!("mpegts format not found"))?;
let muxer = builder
.interleaved(true)
.build(io, format)
.map_err(|err| anyhow!(err.to_string()))?;
current_muxer = Some(muxer);
current_index = Some(index);
current_timing = Some(ChunkTiming {
chunk_index: index,
chunk_start_27mhz: ts.map(|micros| (micros as u64) * 27),
chunk_duration_27mhz: chunk_duration_ms * 27_000,
utc_start_unix: None,
sync_status: "pts".to_string(),
});
if let Some(limit) = max_chunks {
if chunks.len() >= limit {
break;
}
}
}
}
if let Some(muxer) = current_muxer.as_mut() {
let packet = packet.with_time_base(ac_ffmpeg::time::TimeBase::MICROSECONDS);
muxer.push(packet).map_err(|err| anyhow!(err.to_string()))?;
}
}
if let Some(mut muxer) = current_muxer.take() {
let _ = muxer.flush();
let _ = muxer.close();
}
if let (Some(index), Some(timing)) = (current_index.take(), current_timing.take()) {
chunks.push(TsChunk {
index,
path: chunk_path(output_dir, index),
timing,
});
}
Ok(TsChunkManifest {
output_dir: output_dir.to_path_buf(),
chunks,
})
}
pub fn hash_ts_chunks(manifest: &TsChunkManifest) -> Result<HashedTsChunkManifest> {
let mut ordered = manifest.chunks.clone();
ordered.sort_by_key(|chunk| chunk.index);
let mut hashes = Vec::with_capacity(ordered.len());
let mut chunks = Vec::with_capacity(ordered.len());
for chunk in ordered {
let hash = hash_file_blake3(&chunk.path)?;
hashes.push(hash.clone());
chunks.push(HashedTsChunk {
index: chunk.index,
path: chunk.path.clone(),
timing: chunk.timing.clone(),
hash,
});
}
let merkle_root = merkle_root_from_hashes(&hashes)?;
Ok(HashedTsChunkManifest {
output_dir: manifest.output_dir.clone(),
chunks,
merkle_root,
})
}
pub fn build_manifest_body_for_chunks(
stream_id: StreamId,
epoch_id: impl Into<String>,
chunk_duration_ms: u64,
chunk_start_index: u64,
encoder_profile_id: impl Into<String>,
created_unix_ms: u64,
metadata: Vec<StreamMetadata>,
chunk_hashes: &[String],
) -> Result<ManifestBody> {
let merkle_root = merkle_root_from_hashes(chunk_hashes)?;
Ok(ManifestBody {
stream_id,
epoch_id: epoch_id.into(),
chunk_duration_ms,
total_chunks: chunk_hashes.len() as u64,
chunk_start_index,
encoder_profile_id: encoder_profile_id.into(),
merkle_root,
created_unix_ms,
metadata,
chunk_hashes: chunk_hashes.to_vec(),
variants: None,
})
}
pub fn manifest_for_ts_chunks(
stream_id: StreamId,
epoch_id: impl Into<String>,
chunk_duration_ms: u64,
chunk_start_index: u64,
encoder_profile_id: impl Into<String>,
created_unix_ms: u64,
metadata: Vec<StreamMetadata>,
manifest: &TsChunkManifest,
) -> Result<(ManifestBody, HashedTsChunkManifest)> {
let hashed = hash_ts_chunks(manifest)?;
let chunk_hashes = hashed
.chunks
.iter()
.map(|chunk| chunk.hash.clone())
.collect::<Vec<_>>();
let body = build_manifest_body_for_chunks(
stream_id,
epoch_id,
chunk_duration_ms,
chunk_start_index,
encoder_profile_id,
created_unix_ms,
metadata,
&chunk_hashes,
)?;
Ok((body, hashed))
}
pub fn chunk_stream_ffmpeg_live<T: Read, F: FnMut(TsChunk) -> Result<()>>(
stream: T,
output_dir: &Path,
chunk_duration_ms: u64,
max_chunks: Option<usize>,
mut on_chunk: F,
) -> Result<()> {
fs::create_dir_all(output_dir)
.with_context(|| format!("failed to create {}", output_dir.display()))?;
let io = IO::from_read_stream(stream);
let demuxer = Demuxer::builder()
.build(io)
.map_err(|err| anyhow!(err.to_string()))?;
let demuxer = demuxer
.find_stream_info(Some(Duration::from_secs(2)))
.map_err(|(_, err)| anyhow!(err.to_string()))?;
let stream_info = demuxer
.streams()
.iter()
.map(|stream| (stream.codec_parameters(), stream.time_base()))
.collect::<Vec<_>>();
let mut demuxer = demuxer.into_demuxer();
let chunk_duration_micros = chunk_duration_ms as i64 * 1000;
let mut current_index: Option<u64> = None;
let mut current_muxer: Option<Muxer<std::fs::File>> = None;
let mut current_timing: Option<ChunkTiming> = None;
let mut emitted = 0usize;
loop {
let Some(packet) = demuxer.take().map_err(|err| anyhow!(err.to_string()))? else {
break;
};
let ts = packet
.pts()
.as_micros()
.or_else(|| packet.dts().as_micros());
let chunk_index = ts
.and_then(|micros| {
if micros < 0 {
None
} else {
Some((micros / chunk_duration_micros) as u64)
}
})
.or(current_index);
if let Some(index) = chunk_index {
if current_index != Some(index) {
if let Some(mut muxer) = current_muxer.take() {
muxer.flush().map_err(|err| anyhow!(err.to_string()))?;
let _ = muxer.close();
}
if let (Some(prev_index), Some(timing)) =
(current_index.take(), current_timing.take())
{
let chunk = TsChunk {
index: prev_index,
path: chunk_path(output_dir, prev_index),
timing,
};
on_chunk(chunk)?;
emitted += 1;
if let Some(limit) = max_chunks {
if emitted >= limit {
return Ok(());
}
}
}
let path = chunk_path(output_dir, index);
let file = std::fs::File::create(&path)
.with_context(|| format!("failed to create {}", path.display()))?;
let io = IO::from_write_stream(file);
let mut builder = Muxer::builder();
for (params, _) in &stream_info {
builder
.add_stream(params)
.map_err(|err| anyhow!(err.to_string()))?;
}
for (stream, (_, tb)) in builder.streams_mut().iter_mut().zip(stream_info.iter()) {
stream.set_time_base(*tb);
}
let format = OutputFormat::find_by_name("mpegts")
.ok_or_else(|| anyhow!("mpegts format not found"))?;
let muxer = builder
.interleaved(true)
.build(io, format)
.map_err(|err| anyhow!(err.to_string()))?;
current_muxer = Some(muxer);
current_index = Some(index);
current_timing = Some(ChunkTiming {
chunk_index: index,
chunk_start_27mhz: ts.map(|micros| (micros as u64) * 27),
chunk_duration_27mhz: chunk_duration_ms * 27_000,
utc_start_unix: None,
sync_status: "pts".to_string(),
});
}
}
if let Some(muxer) = current_muxer.as_mut() {
let packet = packet.with_time_base(ac_ffmpeg::time::TimeBase::MICROSECONDS);
muxer.push(packet).map_err(|err| anyhow!(err.to_string()))?;
}
}
if let Some(mut muxer) = current_muxer.take() {
let _ = muxer.flush();
let _ = muxer.close();
}
if let (Some(index), Some(timing)) = (current_index.take(), current_timing.take()) {
let chunk = TsChunk {
index,
path: chunk_path(output_dir, index),
timing,
};
on_chunk(chunk)?;
}
Ok(())
}
fn segment_format_arg(format: &ChunkFormat) -> &'static str {
match format {
ChunkFormat::Fmp4 => "mp4",
ChunkFormat::MpegTs => "mpegts",
ChunkFormat::Matroska => "matroska",
}
}
pub fn ffmpeg_profile_args(profile: &DeterminismProfile) -> Vec<String> {
let mut args = Vec::new();
if !profile.encoder.is_empty() {
args.push("-c:v".to_string());
args.push(profile.encoder.clone());
}
for arg in &profile.encoder_args {
args.push(arg.clone());
}
args
}
pub fn deterministic_h264_profile() -> DeterminismProfile {
DeterminismProfile {
name: "deterministic-h264-aac".to_string(),
description: "Single-threaded H.264 + AAC with fixed GOP and bitexact flags".to_string(),
encoder: "libx264".to_string(),
encoder_args: vec![
"-c:a".to_string(),
"aac".to_string(),
"-b:a".to_string(),
"128k".to_string(),
"-ac".to_string(),
"2".to_string(),
"-ar".to_string(),
"48000".to_string(),
"-pix_fmt".to_string(),
"yuv420p".to_string(),
"-g".to_string(),
"60".to_string(),
"-keyint_min".to_string(),
"60".to_string(),
"-sc_threshold".to_string(),
"0".to_string(),
"-bf".to_string(),
"0".to_string(),
"-threads".to_string(),
"1".to_string(),
"-fflags".to_string(),
"+bitexact".to_string(),
"-flags:v".to_string(),
"+bitexact".to_string(),
"-flags:a".to_string(),
"+bitexact".to_string(),
],
chunk_duration_ms: 2000,
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
fn ts_packet_with_pcr(pid: u16, cc: u8, pcr_27mhz: u64) -> [u8; ec_ts::TS_PACKET_SIZE] {
// Match ec_ts parser expectations.
let base = pcr_27mhz / 300;
let ext = pcr_27mhz % 300;
let mut pcr = [0u8; 6];
pcr[0] = ((base >> 25) & 0xFF) as u8;
pcr[1] = ((base >> 17) & 0xFF) as u8;
pcr[2] = ((base >> 9) & 0xFF) as u8;
pcr[3] = ((base >> 1) & 0xFF) as u8;
pcr[4] = (((base & 0x1) << 7) as u8) | 0x7E | (((ext >> 8) & 0x1) as u8);
pcr[5] = (ext & 0xFF) as u8;
let mut data = [0u8; ec_ts::TS_PACKET_SIZE];
data[0] = 0x47;
data[1] = ((pid >> 8) as u8) & 0x1F;
data[2] = (pid & 0xFF) as u8;
data[3] = (2 << 4) | (cc & 0x0F); // adaptation only
data[4] = 7;
data[5] = 0x10;
data[6..12].copy_from_slice(&pcr);
data
}
#[test]
fn segment_format_mapping_is_correct() {
assert_eq!(segment_format_arg(&ChunkFormat::Fmp4), "mp4");
assert_eq!(segment_format_arg(&ChunkFormat::MpegTs), "mpegts");
assert_eq!(segment_format_arg(&ChunkFormat::Matroska), "matroska");
}
#[test]
fn deterministic_profile_args_are_single_threaded_and_bitexact() {
let profile = deterministic_h264_profile();
let args = ffmpeg_profile_args(&profile);
assert!(args.iter().any(|a| a == "-threads"));
assert!(args.iter().any(|a| a == "1"));
assert!(args.iter().any(|a| a == "+bitexact"));
assert!(args.iter().any(|a| a == "libx264"));
}
#[test]
fn hash_file_blake3_matches_direct_hash() {
let dir = std::env::temp_dir().join(format!("ec-chopper-hash-{}", std::process::id()));
let _ = fs::create_dir_all(&dir);
let path = dir.join("x.bin");
fs::write(&path, b"hello").unwrap();
let h = hash_file_blake3(&path).unwrap();
assert_eq!(h, blake3::hash(b"hello").to_hex().to_string());
let _ = fs::remove_file(&path);
}
#[test]
fn chunk_ts_stream_emits_expected_chunk_indices() {
let chunk_ms = 1000u64;
let dir = std::env::temp_dir().join(format!("ec-chopper-chunks-{}", std::process::id()));
let _ = fs::remove_dir_all(&dir);
fs::create_dir_all(&dir).unwrap();
let mut bytes = Vec::new();
bytes.extend_from_slice(&ts_packet_with_pcr(0x0100, 0, 0));
bytes.extend_from_slice(&ts_packet_with_pcr(0x0100, 1, 27_000_000));
bytes.extend_from_slice(&ts_packet_with_pcr(0x0100, 2, 54_000_000));
let manifest = chunk_ts_stream(Cursor::new(bytes), &dir, chunk_ms, None).unwrap();
let indices = manifest.chunks.iter().map(|c| c.index).collect::<Vec<_>>();
assert_eq!(indices, vec![0, 1, 2]);
for chunk in &manifest.chunks {
let data = fs::read(&chunk.path).unwrap();
assert_eq!(data.len() % ec_ts::TS_PACKET_SIZE, 0);
}
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn hashed_manifest_merkle_root_matches_core() {
let dir = std::env::temp_dir().join(format!("ec-chopper-merkle-{}", std::process::id()));
let _ = fs::remove_dir_all(&dir);
fs::create_dir_all(&dir).unwrap();
let mut bytes = Vec::new();
bytes.extend_from_slice(&ts_packet_with_pcr(0x0100, 0, 0));
bytes.extend_from_slice(&ts_packet_with_pcr(0x0100, 1, 27_000_000));
let manifest = chunk_ts_stream(Cursor::new(bytes), &dir, 1000, None).unwrap();
let hashed = hash_ts_chunks(&manifest).unwrap();
let hashes = hashed
.chunks
.iter()
.map(|c| c.hash.clone())
.collect::<Vec<_>>();
let expected = ec_core::merkle_root_from_hashes(&hashes).unwrap();
assert_eq!(hashed.merkle_root, expected);
let _ = fs::remove_dir_all(&dir);
}
}

17
crates/ec-cli/Cargo.toml Normal file
View file

@ -0,0 +1,17 @@
[package]
name = "ec-cli"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
blake3.workspace = true
clap.workspace = true
ec-chopper = { path = "../ec-chopper" }
ec-core = { path = "../ec-core" }
ec-hdhomerun = { path = "../ec-hdhomerun" }
ec-linux-iptv = { path = "../ec-linux-iptv" }
serde_json.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true

379
crates/ec-cli/src/main.rs Normal file
View file

@ -0,0 +1,379 @@
use anyhow::{anyhow, Context, Result};
use blake3;
use clap::{Parser, Subcommand};
use std::fs::{self, File};
use std::io::{Read, Write};
use std::path::PathBuf;
#[derive(Parser, Debug)]
#[command(name = "every.channel")]
#[command(about = "CLI for the every.channel mesh", long_about = None)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// Discover HDHomeRun devices on the network.
Discover,
/// Fetch channel lineup for a device.
Lineup {
/// Hostname or IP (e.g. 192.168.1.10 or hdhomerun.local).
#[arg(long)]
host: Option<String>,
/// Device ID (used as <deviceid>.local).
#[arg(long)]
device_id: Option<String>,
},
/// Parse lineup JSON from a file on disk.
LineupFile { path: String },
/// Open an HDHomeRun stream and dump MPEG-TS to a file.
StreamDump {
/// Hostname or IP (e.g. 192.168.1.10).
#[arg(long)]
host: Option<String>,
/// Device ID (used as <deviceid>.local).
#[arg(long)]
device_id: Option<String>,
/// Guide number (e.g. 8.1).
#[arg(long)]
channel: Option<String>,
/// Guide name (e.g. KQED).
#[arg(long)]
name: Option<String>,
/// Optional duration in seconds (if supported by the tuner URL).
#[arg(long)]
duration: Option<u32>,
/// Output path for the transport stream.
#[arg(long, default_value = "stream.ts")]
output: PathBuf,
},
/// Chunk an input stream using ffmpeg.
Chunk {
/// Input URL or file path.
input: String,
/// Output directory for segments.
output_dir: PathBuf,
},
/// Probe a media file using ac-ffmpeg.
Probe {
/// Input file path.
input: String,
},
/// Analyze TS timing and chunk boundaries.
TsSync {
/// Input TS file.
input: String,
/// Chunk duration in ms.
#[arg(long, default_value_t = 2000)]
chunk_ms: u64,
/// Maximum number of events to print.
#[arg(long, default_value_t = 50)]
max_events: usize,
},
/// Re-encode the same input multiple times and compare segment hashes.
DeterminismTest {
/// Input file path (TS or other supported by ffmpeg).
input: String,
/// Output directory root (runs will be placed under run-*/).
output_dir: PathBuf,
/// Number of runs to compare.
#[arg(long, default_value_t = 2)]
runs: usize,
},
/// Open a Linux DVB DVR device and dump MPEG-TS to a file.
LinuxDvbDump {
/// DVB adapter index.
#[arg(long, default_value_t = 0)]
adapter: u32,
/// DVR device index.
#[arg(long, default_value_t = 0)]
dvr: u32,
/// Optional tune command (repeat for each arg).
#[arg(long, allow_hyphen_values = true)]
tune_cmd: Vec<String>,
/// Optional tune wait (ms).
#[arg(long)]
tune_wait_ms: Option<u64>,
/// Output path for the transport stream.
#[arg(long, default_value = "linux-dvb.ts")]
output: PathBuf,
},
}
fn main() -> Result<()> {
tracing_subscriber::fmt().init();
let cli = Cli::parse();
match cli.command {
Commands::Discover => {
let devices = ec_hdhomerun::discover()?;
println!("{}", serde_json::to_string_pretty(&devices)?);
}
Commands::Lineup { host, device_id } => {
let device = resolve_device(host, device_id)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
println!("{}", serde_json::to_string_pretty(&lineup)?);
}
Commands::LineupFile { path } => {
let bytes = fs::read(&path)?;
let lineup = ec_hdhomerun::lineup_from_json_bytes(&bytes, None)?;
println!("{}", serde_json::to_string_pretty(&lineup)?);
}
Commands::StreamDump {
host,
device_id,
channel,
name,
duration,
output,
} => {
let device = resolve_device(host, device_id)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
let entry = if let Some(channel) = channel {
ec_hdhomerun::find_lineup_entry_by_number(&lineup, &channel)
.or_else(|| ec_hdhomerun::find_lineup_entry_by_name(&lineup, &channel))
.ok_or_else(|| anyhow!("channel not found: {channel}"))?
} else if let Some(name) = name {
ec_hdhomerun::find_lineup_entry_by_name(&lineup, &name)
.ok_or_else(|| anyhow!("channel not found: {name}"))?
} else {
return Err(anyhow!("--channel or --name required"));
};
let mut stream = ec_hdhomerun::open_stream_entry(entry, duration)?;
let mut file = File::create(&output)
.with_context(|| format!("failed to create {}", output.display()))?;
let mut buf = [0u8; 8192];
loop {
let read = stream.read(&mut buf)?;
if read == 0 {
break;
}
file.write_all(&buf[..read])?;
}
}
Commands::Chunk { input, output_dir } => {
let profile = ec_chopper::deterministic_h264_profile();
let config = ec_chopper::ChunkerConfig {
output_dir,
segment_duration_ms: profile.chunk_duration_ms,
segment_template: ec_chopper::ChunkerConfig::default_segment_template(),
format: ec_chopper::ChunkFormat::Fmp4,
profile,
};
let input = if input.starts_with("http://") || input.starts_with("https://") {
ec_chopper::ChunkerInput::Url(input)
} else {
ec_chopper::ChunkerInput::File(PathBuf::from(input))
};
let segmenter = ec_chopper::FfmpegCliSegmenter::default();
let mut process = segmenter.spawn(input, &config)?;
let status = process.child.wait()?;
if !status.success() {
return Err(anyhow!("ffmpeg exited with status {status}"));
}
let manifest = ec_chopper::collect_segments(&process.output_dir)?;
println!("{}", serde_json::to_string_pretty(&manifest)?);
}
Commands::Probe { input } => {
let file = File::open(&input).with_context(|| format!("failed to open {}", input))?;
let probes = ec_chopper::probe_read_stream(file)?;
println!("{}", serde_json::to_string_pretty(&probes)?);
}
Commands::TsSync {
input,
chunk_ms,
max_events,
} => {
let file = File::open(&input).with_context(|| format!("failed to open {}", input))?;
let events = ec_chopper::analyze_ts_time(file, chunk_ms, max_events)?;
println!("{}", serde_json::to_string_pretty(&events)?);
}
Commands::DeterminismTest {
input,
output_dir,
runs,
} => {
if runs < 1 {
return Err(anyhow!("runs must be >= 1"));
}
let profile = ec_chopper::deterministic_h264_profile();
let format = ec_chopper::ChunkFormat::Fmp4;
let template = ec_chopper::ChunkerConfig::default_segment_template();
let mut baseline: Option<Vec<String>> = None;
for run in 0..runs {
let run_dir = output_dir.join(format!("run-{}", run + 1));
let _ = fs::remove_dir_all(&run_dir);
let config = ec_chopper::ChunkerConfig {
output_dir: run_dir.clone(),
segment_duration_ms: profile.chunk_duration_ms,
segment_template: template.clone(),
format: format.clone(),
profile: profile.clone(),
};
let input_spec = if input.starts_with("http://") || input.starts_with("https://") {
ec_chopper::ChunkerInput::Url(input.clone())
} else {
ec_chopper::ChunkerInput::File(PathBuf::from(&input))
};
let segmenter = ec_chopper::FfmpegCliSegmenter::default();
let mut process = segmenter.spawn(input_spec, &config)?;
let status = process.child.wait()?;
if !status.success() {
return Err(anyhow!("ffmpeg exited with status {status}"));
}
let hashes = hash_segments(&process.output_dir)?;
match baseline.as_ref() {
None => {
baseline = Some(hashes);
println!(
"run {}: baseline ({}) segments",
run + 1,
baseline.as_ref().unwrap().len()
);
}
Some(base) => {
let mismatches = compare_hashes(base, &hashes);
if mismatches > 0 {
return Err(anyhow!(
"determinism mismatch on run {} ({} mismatches)",
run + 1,
mismatches
));
}
println!("run {}: matched baseline", run + 1);
}
}
}
}
Commands::LinuxDvbDump {
adapter,
dvr,
tune_cmd,
tune_wait_ms,
output,
} => {
let config = ec_linux_iptv::LinuxDvbConfig {
adapter,
frontend: 0,
dvr,
tune_command: if tune_cmd.is_empty() {
None
} else {
Some(tune_cmd)
},
tune_timeout_ms: tune_wait_ms,
};
let mut stream = ec_linux_iptv::open_stream(&config)?;
let mut file = File::create(&output)
.with_context(|| format!("failed to create {}", output.display()))?;
let mut buf = [0u8; 8192];
loop {
let read = stream.read(&mut buf)?;
if read == 0 {
break;
}
file.write_all(&buf[..read])?;
}
}
}
Ok(())
}
fn hash_segments(output_dir: &PathBuf) -> Result<Vec<String>> {
let manifest = ec_chopper::collect_segments(output_dir)?;
let mut hashes = Vec::new();
for segment in manifest.segments {
let bytes = fs::read(&segment.path)
.with_context(|| format!("failed to read {}", segment.path.display()))?;
let hash = blake3::hash(&bytes);
hashes.push(hash.to_hex().to_string());
}
Ok(hashes)
}
fn compare_hashes(base: &[String], candidate: &[String]) -> usize {
let mut mismatches = 0usize;
let max_len = base.len().max(candidate.len());
for idx in 0..max_len {
let base_hash = base.get(idx);
let candidate_hash = candidate.get(idx);
if base_hash != candidate_hash {
mismatches += 1;
}
}
mismatches
}
fn resolve_device(
host: Option<String>,
device_id: Option<String>,
) -> Result<ec_hdhomerun::HdhomerunDevice> {
if let Some(host) = host {
ec_hdhomerun::discover_from_host(&host)
} else if let Some(device_id) = device_id {
let host = format!("{device_id}.local");
ec_hdhomerun::discover_from_host(&host)
} else {
let mut devices = ec_hdhomerun::discover()?;
devices
.pop()
.ok_or_else(|| anyhow!("no HDHomeRun devices found"))
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
#[test]
fn clap_parses_common_subcommands() {
let cli = Cli::try_parse_from(["every.channel", "discover"]).unwrap();
matches!(cli.command, Commands::Discover);
let cli = Cli::try_parse_from([
"every.channel",
"ts-sync",
"input.ts",
"--chunk-ms",
"1000",
"--max-events",
"5",
])
.unwrap();
matches!(cli.command, Commands::TsSync { .. });
let cli = Cli::try_parse_from([
"every.channel",
"linux-dvb-dump",
"--adapter",
"0",
"--dvr",
"0",
"--tune-cmd",
"dvbv5-zap",
"--tune-cmd",
"-r",
"--tune-cmd",
"KQED",
])
.unwrap();
matches!(cli.command, Commands::LinuxDvbDump { .. });
}
}

10
crates/ec-core/Cargo.toml Normal file
View file

@ -0,0 +1,10 @@
[package]
name = "ec-core"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
serde.workspace = true
blake3.workspace = true
serde_json.workspace = true

463
crates/ec-core/src/lib.rs Normal file
View file

@ -0,0 +1,463 @@
//! Core types shared across every.channel.
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ChannelId(pub String);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct DeviceId(pub String);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StreamId(pub String);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamDescriptor {
pub id: StreamId,
pub title: String,
pub number: Option<String>,
pub source: String,
pub metadata: Vec<StreamMetadata>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamMetadata {
pub key: String,
pub value: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BroadcastId {
pub standard: String,
pub transport_stream_id: Option<u16>,
pub program_number: Option<u16>,
pub callsign: Option<String>,
pub region: Option<String>,
pub frequency: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SourceId {
pub kind: String,
pub device_id: Option<String>,
pub channel: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamKey {
pub version: u16,
pub broadcast: Option<BroadcastId>,
pub source: Option<SourceId>,
pub profile: Option<String>,
pub variant: Option<String>,
}
impl StreamKey {
pub fn to_stream_id(&self) -> StreamId {
let mut parts = vec![
"ec".to_string(),
"stream".to_string(),
format!("v{}", self.version),
];
if let Some(broadcast) = &self.broadcast {
parts.push("broadcast".to_string());
parts.push(sanitize(&broadcast.standard));
if let Some(tsid) = broadcast.transport_stream_id {
parts.push(format!("tsid-{tsid}"));
}
if let Some(program) = broadcast.program_number {
parts.push(format!("program-{program}"));
}
if let Some(callsign) = &broadcast.callsign {
parts.push(format!("callsign-{}", sanitize(callsign)));
}
if let Some(region) = &broadcast.region {
parts.push(format!("region-{}", sanitize(region)));
}
if let Some(freq) = &broadcast.frequency {
parts.push(format!("freq-{}", sanitize(freq)));
}
} else if let Some(source) = &self.source {
parts.push("source".to_string());
parts.push(sanitize(&source.kind));
if let Some(device) = &source.device_id {
parts.push(format!("device-{}", sanitize(device)));
}
if let Some(channel) = &source.channel {
parts.push(format!("channel-{}", sanitize(channel)));
}
} else {
parts.push("unknown".to_string());
}
if let Some(profile) = &self.profile {
parts.push(format!("profile-{}", sanitize(profile)));
}
if let Some(variant) = &self.variant {
parts.push(format!("variant-{}", sanitize(variant)));
}
StreamId(parts.join("/"))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub number: Option<String>,
pub program_id: Option<u16>,
pub metadata: Vec<ChannelMetadata>,
}
fn sanitize(value: &str) -> String {
value
.chars()
.map(|c| match c {
'a'..='z' | '0'..='9' | '-' | '_' => c,
'A'..='Z' => c.to_ascii_lowercase(),
_ => '_',
})
.collect()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ChannelMetadata {
Callsign(String),
Network(String),
Region(String),
Frequency(String),
Extra(String, String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PacketDigest {
pub algorithm: String,
pub hex: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeterminismProfile {
pub name: String,
pub description: String,
pub encoder: String,
pub encoder_args: Vec<String>,
pub chunk_duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeDescriptor {
pub node_id: String,
pub human_name: String,
pub location_hint: Option<String>,
pub capabilities: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamEncryptionInfo {
pub alg: String,
pub key_id: String,
pub nonce_scheme: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MoqStreamDescriptor {
pub endpoint: String,
pub broadcast_name: String,
pub track_name: String,
pub encryption: Option<StreamEncryptionInfo>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamCatalogEntry {
pub stream: StreamDescriptor,
pub moq: Option<MoqStreamDescriptor>,
pub manifest: Option<ManifestSummary>,
pub updated_unix_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamCatalog {
pub entries: Vec<StreamCatalogEntry>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestSummary {
pub manifest_id: String,
pub merkle_root: String,
pub epoch_id: String,
pub total_chunks: u64,
pub chunk_start_index: u64,
pub encoder_profile_id: String,
pub signed_by: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkId {
pub stream_id: StreamId,
pub epoch_id: String,
pub chunk_index: u64,
pub chunk_hash: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestVariant {
pub variant_id: String,
pub stream_id: StreamId,
pub chunk_start_index: u64,
pub total_chunks: u64,
pub merkle_root: String,
pub chunk_hashes: Vec<String>,
#[serde(default)]
pub metadata: Vec<StreamMetadata>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestBody {
pub stream_id: StreamId,
pub epoch_id: String,
pub chunk_duration_ms: u64,
pub total_chunks: u64,
pub chunk_start_index: u64,
pub encoder_profile_id: String,
pub merkle_root: String,
pub created_unix_ms: u64,
pub metadata: Vec<StreamMetadata>,
pub chunk_hashes: Vec<String>,
#[serde(default)]
pub variants: Option<Vec<ManifestVariant>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestSignature {
pub signer_id: String,
pub alg: String,
pub signature: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Manifest {
pub body: ManifestBody,
pub manifest_id: String,
pub signatures: Vec<ManifestSignature>,
}
impl Manifest {
pub fn summary(&self) -> ManifestSummary {
ManifestSummary {
manifest_id: self.manifest_id.clone(),
merkle_root: self.body.merkle_root.clone(),
epoch_id: self.body.epoch_id.clone(),
total_chunks: self.body.total_chunks,
chunk_start_index: self.body.chunk_start_index,
encoder_profile_id: self.body.encoder_profile_id.clone(),
signed_by: self
.signatures
.iter()
.map(|sig| sig.signer_id.clone())
.collect(),
}
}
}
#[derive(Debug, Clone)]
pub enum ManifestError {
Empty,
InvalidHash(String),
}
impl fmt::Display for ManifestError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ManifestError::Empty => write!(f, "no chunk hashes supplied"),
ManifestError::InvalidHash(value) => write!(f, "invalid chunk hash: {value}"),
}
}
}
impl std::error::Error for ManifestError {}
impl ManifestBody {
pub fn manifest_id(&self) -> Result<String, serde_json::Error> {
let bytes = serde_json::to_vec(self)?;
Ok(blake3::hash(&bytes).to_hex().to_string())
}
}
pub fn merkle_root_from_hashes(hashes: &[String]) -> Result<String, ManifestError> {
if hashes.is_empty() {
return Err(ManifestError::Empty);
}
let mut nodes: Vec<blake3::Hash> = Vec::with_capacity(hashes.len());
for hash in hashes {
let parsed = blake3::Hash::from_hex(hash.as_bytes())
.map_err(|_| ManifestError::InvalidHash(hash.clone()))?;
nodes.push(parsed);
}
while nodes.len() > 1 {
if nodes.len() % 2 == 1 {
if let Some(last) = nodes.last().cloned() {
nodes.push(last);
}
}
let mut parents = Vec::with_capacity(nodes.len() / 2);
for pair in nodes.chunks(2) {
let left = pair[0].as_bytes();
let right = pair[1].as_bytes();
let mut merged = [0u8; 64];
merged[..32].copy_from_slice(left);
merged[32..].copy_from_slice(right);
parents.push(blake3::hash(&merged));
}
nodes = parents;
}
Ok(nodes[0].to_hex().to_string())
}
pub fn merkle_proof_for_index(
hashes: &[String],
index: usize,
) -> Result<Vec<String>, ManifestError> {
if hashes.is_empty() {
return Err(ManifestError::Empty);
}
if index >= hashes.len() {
return Err(ManifestError::InvalidHash(format!(
"index {index} out of bounds"
)));
}
let mut nodes: Vec<blake3::Hash> = Vec::with_capacity(hashes.len());
for hash in hashes {
let parsed = blake3::Hash::from_hex(hash.as_bytes())
.map_err(|_| ManifestError::InvalidHash(hash.clone()))?;
nodes.push(parsed);
}
let mut proof = Vec::new();
let mut pos = index;
while nodes.len() > 1 {
if nodes.len() % 2 == 1 {
if let Some(last) = nodes.last().cloned() {
nodes.push(last);
}
}
let sibling_index = if pos % 2 == 0 { pos + 1 } else { pos - 1 };
let sibling = nodes
.get(sibling_index)
.ok_or_else(|| ManifestError::InvalidHash("missing sibling".to_string()))?;
proof.push(sibling.to_hex().to_string());
let mut parents = Vec::with_capacity(nodes.len() / 2);
for pair in nodes.chunks(2) {
let left = pair[0].as_bytes();
let right = pair[1].as_bytes();
let mut merged = [0u8; 64];
merged[..32].copy_from_slice(left);
merged[32..].copy_from_slice(right);
parents.push(blake3::hash(&merged));
}
nodes = parents;
pos /= 2;
}
Ok(proof)
}
pub fn verify_merkle_proof(
leaf_hash: &str,
mut index: usize,
branch: &[String],
expected_root: &str,
) -> bool {
let Ok(mut acc) = blake3::Hash::from_hex(leaf_hash.as_bytes()) else {
return false;
};
for sibling_hex in branch {
let Ok(sibling) = blake3::Hash::from_hex(sibling_hex.as_bytes()) else {
return false;
};
let (left, right) = if index % 2 == 0 {
(acc, sibling)
} else {
(sibling, acc)
};
let mut merged = [0u8; 64];
merged[..32].copy_from_slice(left.as_bytes());
merged[32..].copy_from_slice(right.as_bytes());
acc = blake3::hash(&merged);
index /= 2;
}
acc.to_hex().to_string() == expected_root
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn manifest_id_changes_with_body() {
let body = ManifestBody {
stream_id: StreamId("s".to_string()),
epoch_id: "e".to_string(),
chunk_duration_ms: 2000,
total_chunks: 1,
chunk_start_index: 0,
encoder_profile_id: "p".to_string(),
merkle_root: "00".repeat(32),
created_unix_ms: 1,
metadata: Vec::new(),
chunk_hashes: vec!["11".repeat(32)],
variants: None,
};
let id1 = body.manifest_id().unwrap();
let mut body2 = body.clone();
body2.created_unix_ms = 2;
let id2 = body2.manifest_id().unwrap();
assert_ne!(id1, id2);
}
#[test]
fn merkle_root_single_is_leaf() {
let leaf = blake3::hash(b"leaf").to_hex().to_string();
let root = merkle_root_from_hashes(&[leaf.clone()]).unwrap();
assert_eq!(root, leaf);
}
#[test]
fn merkle_root_rejects_invalid_hash() {
let err = merkle_root_from_hashes(&["not-hex".to_string()]).unwrap_err();
assert!(matches!(err, ManifestError::InvalidHash(_)));
}
#[test]
fn merkle_proof_roundtrip_small_sets() {
for size in 1..=9usize {
let leaves = (0..size)
.map(|i| blake3::hash(&[i as u8]).to_hex().to_string())
.collect::<Vec<_>>();
let root = merkle_root_from_hashes(&leaves).unwrap();
for idx in 0..size {
let proof = merkle_proof_for_index(&leaves, idx).unwrap();
assert!(
verify_merkle_proof(&leaves[idx], idx, &proof, &root),
"size {size} idx {idx} failed"
);
}
}
}
#[test]
fn merkle_proof_detects_tampering() {
let leaves = (0..4usize)
.map(|i| blake3::hash(&[i as u8]).to_hex().to_string())
.collect::<Vec<_>>();
let root = merkle_root_from_hashes(&leaves).unwrap();
let mut proof = merkle_proof_for_index(&leaves, 2).unwrap();
proof[0] = blake3::hash(b"evil").to_hex().to_string();
assert!(!verify_merkle_proof(&leaves[2], 2, &proof, &root));
}
}

View file

@ -0,0 +1,12 @@
[package]
name = "ec-crypto"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
blake3 = "1"
chacha20poly1305 = "0.10"
ed25519-dalek = { version = "2", features = ["pkcs8"] }
hex = "0.4"
ec-core = { path = "../ec-core" }

227
crates/ec-crypto/src/lib.rs Normal file
View file

@ -0,0 +1,227 @@
//! Cryptographic helpers for every.channel.
use chacha20poly1305::{aead::Aead, KeyInit, XChaCha20Poly1305, XNonce};
use ec_core::ManifestSignature;
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey};
use std::env;
use std::fs;
pub const MANIFEST_SIG_ALG: &str = "ed25519";
pub const ENCRYPTION_ALG: &str = "xchacha20poly1305";
/// Derive a stream encryption key from a stream id and optional network secret.
///
/// This is deterministic: identical stream ids produce identical keys.
pub fn derive_stream_key(stream_id: &str, network_secret: Option<&[u8]>) -> [u8; 32] {
let mut input = Vec::new();
if let Some(secret) = network_secret {
input.extend_from_slice(secret);
input.push(0);
}
input.extend_from_slice(stream_id.as_bytes());
blake3::derive_key("every.channel stream key v1", &input)
}
/// Derive a deterministic nonce for a stream chunk.
pub fn derive_stream_nonce(stream_id: &str, chunk_index: u64) -> [u8; 24] {
let mut hasher = blake3::Hasher::new();
hasher.update(b"every.channel stream nonce v1");
hasher.update(stream_id.as_bytes());
hasher.update(&chunk_index.to_be_bytes());
let hash = hasher.finalize();
let mut nonce = [0u8; 24];
nonce.copy_from_slice(&hash.as_bytes()[..24]);
nonce
}
#[derive(Debug, Clone)]
pub struct EncryptedPayload {
pub ciphertext: Vec<u8>,
pub nonce: [u8; 24],
pub alg: &'static str,
}
pub fn encrypt_stream_data(
stream_id: &str,
chunk_index: u64,
plaintext: &[u8],
network_secret: Option<&[u8]>,
) -> EncryptedPayload {
let key_bytes = derive_stream_key(stream_id, network_secret);
let cipher = XChaCha20Poly1305::new_from_slice(&key_bytes).expect("key size");
let nonce_bytes = derive_stream_nonce(stream_id, chunk_index);
let nonce = XNonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.expect("encryption failure");
EncryptedPayload {
ciphertext,
nonce: nonce_bytes,
alg: ENCRYPTION_ALG,
}
}
pub fn decrypt_stream_data(
stream_id: &str,
chunk_index: u64,
ciphertext: &[u8],
network_secret: Option<&[u8]>,
) -> Option<Vec<u8>> {
let key_bytes = derive_stream_key(stream_id, network_secret);
let cipher = XChaCha20Poly1305::new_from_slice(&key_bytes).expect("key size");
let nonce_bytes = derive_stream_nonce(stream_id, chunk_index);
let nonce = XNonce::from_slice(&nonce_bytes);
cipher.decrypt(nonce, ciphertext).ok()
}
#[derive(Debug, Clone)]
pub struct ManifestKeypair {
pub signing_key: SigningKey,
pub verifying_key: VerifyingKey,
}
pub fn load_manifest_keypair_from_env() -> Result<Option<ManifestKeypair>, String> {
let value = match env::var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY") {
Ok(value) => value,
Err(env::VarError::NotPresent) => return Ok(None),
Err(err) => return Err(err.to_string()),
};
let trimmed = value.trim();
let key_bytes = if std::path::Path::new(trimmed).exists() {
let text = fs::read_to_string(trimmed).map_err(|err| err.to_string())?;
hex::decode(text.trim()).map_err(|err| err.to_string())?
} else {
hex::decode(trimmed).map_err(|err| err.to_string())?
};
let bytes = if key_bytes.len() == 32 {
key_bytes
} else if key_bytes.len() == 64 {
key_bytes[..32].to_vec()
} else {
return Err("manifest signing key must be 32 or 64 hex bytes".to_string());
};
let mut secret = [0u8; 32];
secret.copy_from_slice(&bytes[..32]);
let signing_key = SigningKey::from_bytes(&secret);
let verifying_key = signing_key.verifying_key();
Ok(Some(ManifestKeypair {
signing_key,
verifying_key,
}))
}
pub fn signer_id_from_key(key: &VerifyingKey) -> String {
format!("ed25519:{}", hex::encode(key.to_bytes()))
}
pub fn sign_manifest_id(manifest_id: &str, keypair: &ManifestKeypair) -> ManifestSignature {
let signature: Signature = keypair.signing_key.sign(manifest_id.as_bytes());
ManifestSignature {
signer_id: signer_id_from_key(&keypair.verifying_key),
alg: MANIFEST_SIG_ALG.to_string(),
signature: hex::encode(signature.to_bytes()),
}
}
pub fn verify_manifest_signature(manifest_id: &str, sig: &ManifestSignature) -> bool {
if sig.alg != MANIFEST_SIG_ALG {
return false;
}
let signer_id = sig
.signer_id
.strip_prefix("ed25519:")
.unwrap_or(&sig.signer_id);
let Ok(pk_bytes) = hex::decode(signer_id) else {
return false;
};
if pk_bytes.len() != 32 {
return false;
}
let mut pk = [0u8; 32];
pk.copy_from_slice(&pk_bytes);
let Ok(verifying_key) = VerifyingKey::from_bytes(&pk) else {
return false;
};
let Ok(sig_bytes) = hex::decode(&sig.signature) else {
return false;
};
let Ok(signature) = Signature::from_slice(&sig_bytes) else {
return false;
};
verifying_key
.verify(manifest_id.as_bytes(), &signature)
.is_ok()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stream_key_is_deterministic_and_secret_sensitive() {
let k1 = derive_stream_key("s1", None);
let k2 = derive_stream_key("s1", None);
assert_eq!(k1, k2);
let k3 = derive_stream_key("s2", None);
assert_ne!(k1, k3);
let secret = [7u8; 32];
let ks1 = derive_stream_key("s1", Some(&secret));
assert_ne!(k1, ks1);
let ks2 = derive_stream_key("s1", Some(&secret));
assert_eq!(ks1, ks2);
}
#[test]
fn nonce_changes_per_chunk_index() {
let n1 = derive_stream_nonce("s", 1);
let n2 = derive_stream_nonce("s", 2);
assert_ne!(n1, n2);
}
#[test]
fn encrypt_decrypt_roundtrip() {
let plaintext = b"hello world";
let enc = encrypt_stream_data("s", 42, plaintext, None);
assert_ne!(enc.ciphertext, plaintext);
let out = decrypt_stream_data("s", 42, &enc.ciphertext, None).unwrap();
assert_eq!(out, plaintext);
}
#[test]
fn decrypt_fails_with_wrong_index() {
let plaintext = b"hello world";
let enc = encrypt_stream_data("s", 42, plaintext, None);
assert!(decrypt_stream_data("s", 43, &enc.ciphertext, None).is_none());
}
#[test]
fn manifest_sign_verify_roundtrip() {
let secret = [1u8; 32];
let signing_key = SigningKey::from_bytes(&secret);
let verifying_key = signing_key.verifying_key();
let keypair = ManifestKeypair {
signing_key,
verifying_key,
};
let sig = sign_manifest_id("m", &keypair);
assert!(verify_manifest_signature("m", &sig));
assert!(!verify_manifest_signature("evil", &sig));
}
#[test]
fn load_keypair_from_env_hex() {
let prev = env::var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY").ok();
env::set_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", "00".repeat(32));
let loaded = load_manifest_keypair_from_env().unwrap().unwrap();
let id = signer_id_from_key(&loaded.verifying_key);
assert!(id.starts_with("ed25519:"));
match prev {
Some(value) => env::set_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", value),
None => env::remove_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY"),
}
}
}

View file

@ -0,0 +1,16 @@
[package]
name = "ec-direct"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
base64 = "0.22"
just-webrtc = { version = "0.2", default-features = true }
serde.workspace = true
serde_json.workspace = true
[dev-dependencies]
bytes = "1"
tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }

View file

@ -0,0 +1,94 @@
use anyhow::{anyhow, Context, Result};
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use base64::Engine;
use just_webrtc::types::{ICECandidate, SessionDescription};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct DirectCodeV1 {
pub v: u8,
pub desc: SessionDescription,
pub candidates: Vec<ICECandidate>,
#[serde(default)]
pub label: Option<String>,
}
const PREFIX: &str = "every.channel://";
pub fn encode_code(code: &DirectCodeV1) -> Result<String> {
let json = serde_json::to_vec(code)?;
Ok(URL_SAFE_NO_PAD.encode(json))
}
pub fn decode_code(code: &str) -> Result<DirectCodeV1> {
let bytes = URL_SAFE_NO_PAD
.decode(code.trim())
.context("invalid base64url code")?;
let parsed: DirectCodeV1 = serde_json::from_slice(&bytes).context("invalid code json")?;
if parsed.v != 1 {
return Err(anyhow!("unsupported direct code version {}", parsed.v));
}
Ok(parsed)
}
pub fn build_direct_link(code_b64: &str) -> String {
format!("every.channel://direct?c={code_b64}")
}
pub fn encode_direct_link(code: &DirectCodeV1) -> Result<String> {
let b64 = encode_code(code)?;
Ok(build_direct_link(&b64))
}
pub fn decode_direct_link(link_or_code: &str) -> Result<DirectCodeV1> {
let s = link_or_code.trim();
if !s.starts_with(PREFIX) {
return decode_code(s);
}
let rest = &s[PREFIX.len()..];
let (path, query) = rest.split_once('?').ok_or_else(|| anyhow!("missing '?'"))?;
if !path.eq_ignore_ascii_case("direct") {
return Err(anyhow!("not a direct link"));
}
for pair in query.split('&') {
let pair = pair.trim();
if pair.is_empty() {
continue;
}
let (k, v) = pair.split_once('=').unwrap_or((pair, ""));
if k.eq_ignore_ascii_case("c") {
return decode_code(v);
}
}
Err(anyhow!("missing code parameter"))
}
#[cfg(test)]
mod tests {
use super::*;
use just_webrtc::types::SDPType;
#[test]
fn code_roundtrips() {
let code = DirectCodeV1 {
v: 1,
desc: SessionDescription {
sdp_type: SDPType::Offer,
sdp: "x".to_string(),
},
candidates: vec![ICECandidate {
candidate: "c".to_string(),
sdp_mid: Some("0".to_string()),
sdp_mline_index: Some(0),
username_fragment: None,
}],
label: Some("ec".to_string()),
};
let enc = encode_code(&code).unwrap();
let dec = decode_code(&enc).unwrap();
assert_eq!(dec, code);
let link = encode_direct_link(&code).unwrap();
let dec2 = decode_direct_link(&link).unwrap();
assert_eq!(dec2, code);
}
}

View file

@ -0,0 +1,134 @@
use anyhow::{anyhow, Result};
use bytes::Bytes;
use ec_direct::{decode_direct_link, encode_direct_link, DirectCodeV1};
use just_webrtc::types::{
DataChannelOptions, PeerConfiguration, PeerConnectionState, SessionDescription,
};
use just_webrtc::{DataChannelExt, PeerConnectionBuilder, PeerConnectionExt};
async fn wait_connected(pc: &impl PeerConnectionExt) -> Result<()> {
tokio::time::timeout(std::time::Duration::from_secs(20), async {
loop {
match pc.state_change().await {
PeerConnectionState::Connected => break Ok(()),
PeerConnectionState::Failed => break Err(anyhow!("peer connection failed")),
PeerConnectionState::Closed => break Err(anyhow!("peer connection closed")),
_ => {}
}
}
})
.await
.map_err(|_| anyhow!("timed out waiting for peer connection"))?
}
// Ignored by default: WebRTC can be timing-sensitive on some hosts.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore]
async fn e2e_direct_connect_loopback_sends_bytes() -> Result<()> {
// Avoid depending on external STUN servers in tests: use host candidates only.
let cfg = PeerConfiguration {
ice_servers: vec![],
..Default::default()
};
let offerer = PeerConnectionBuilder::new()
.set_config(cfg.clone())
.with_channel_options(vec![(
"simple_channel_".to_string(),
DataChannelOptions::default(),
)])
.map_err(|e| anyhow!("{e:#}"))?
.build()
.await
.map_err(|e| anyhow!("{e:#}"))?;
let offer_desc: SessionDescription = offerer
.get_local_description()
.await
.ok_or_else(|| anyhow!("missing offer local description"))?;
let offer_candidates = offerer
.collect_ice_candidates()
.await
.map_err(|e| anyhow!("{e:#}"))?;
let offer_link = encode_direct_link(&DirectCodeV1 {
v: 1,
desc: offer_desc,
candidates: offer_candidates,
label: Some("every.channel0".to_string()),
})?;
let offer_code = decode_direct_link(&offer_link)?;
let answerer = PeerConnectionBuilder::new()
.set_config(cfg.clone())
.with_remote_offer(Some(offer_code.desc.clone()))
.map_err(|e| anyhow!("{e:#}"))?
.build()
.await
.map_err(|e| anyhow!("{e:#}"))?;
answerer
.add_ice_candidates(offer_code.candidates.clone())
.await
.map_err(|e| anyhow!("{e:#}"))?;
let answer_desc = answerer
.get_local_description()
.await
.ok_or_else(|| anyhow!("missing answer local description"))?;
let answer_candidates = answerer
.collect_ice_candidates()
.await
.map_err(|e| anyhow!("{e:#}"))?;
let answer_link = encode_direct_link(&DirectCodeV1 {
v: 1,
desc: answer_desc,
candidates: answer_candidates,
label: Some("every.channel0".to_string()),
})?;
let answer_code = decode_direct_link(&answer_link)?;
offerer
.set_remote_description(answer_code.desc.clone())
.await
.map_err(|e| anyhow!("{e:#}"))?;
offerer
.add_ice_candidates(answer_code.candidates.clone())
.await
.map_err(|e| anyhow!("{e:#}"))?;
// Wait for both peers to report a full connection before waiting for the data channel.
wait_connected(&offerer).await?;
wait_connected(&answerer).await?;
let offerer_ch = offerer
.receive_channel()
.await
.map_err(|e| anyhow!("{e:#}"))?;
let answerer_ch = answerer
.receive_channel()
.await
.map_err(|e| anyhow!("{e:#}"))?;
offerer_ch.wait_ready().await;
answerer_ch.wait_ready().await;
let payload = Bytes::from_static(b"hello");
offerer_ch
.send(&payload)
.await
.map_err(|e| anyhow!("{e:#}"))?;
let got = tokio::time::timeout(std::time::Duration::from_secs(10), answerer_ch.receive())
.await
.map_err(|_| anyhow!("timed out waiting for receive"))?
.map_err(|e| anyhow!("{e:#}"))?;
assert_eq!(&got[..], b"hello");
// Confirm the reverse direction works too (this also guards against one-way readiness bugs).
answerer_ch
.send(&Bytes::from_static(b"world"))
.await
.map_err(|e| anyhow!("{e:#}"))?;
let got = tokio::time::timeout(std::time::Duration::from_secs(10), offerer_ch.receive())
.await
.map_err(|_| anyhow!("timed out waiting for receive"))?
.map_err(|e| anyhow!("{e:#}"))?;
assert_eq!(&got[..], b"world");
Ok(())
}

View file

@ -0,0 +1,14 @@
[package]
name = "ec-hdhomerun"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
ec-core = { path = "../ec-core" }
crc32fast = "1"
hex = "0.4"
serde.workspace = true
serde_json.workspace = true
ureq = { version = "2", default-features = true, features = ["tls"] }

View file

@ -0,0 +1,676 @@
//! HDHomeRun discovery, lineup ingest, and stream scaffolding.
use anyhow::{anyhow, Context, Result};
use ec_core::{Channel, ChannelId, ChannelMetadata, DeviceId};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::io::Read;
use std::net::{Ipv4Addr, SocketAddrV4, UdpSocket};
use std::time::{Duration, Instant};
const DISCOVER_UDP_PORT: u16 = 65001;
const TYPE_DISCOVER_REQ: u16 = 0x0002;
const TYPE_DISCOVER_RPY: u16 = 0x0003;
const TAG_DEVICE_TYPE: u8 = 0x01;
const TAG_DEVICE_ID: u8 = 0x02;
const TAG_TUNER_COUNT: u8 = 0x10;
const TAG_DEVICE_AUTH_BIN: u8 = 0x29;
const TAG_BASE_URL: u8 = 0x2A;
const TAG_DEVICE_AUTH_STR: u8 = 0x2B;
const DEVICE_TYPE_TUNER: u32 = 0x00000001;
const DEVICE_ID_WILDCARD: u32 = 0xFFFFFFFF;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeviceField {
pub key: String,
pub value: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HdhomerunDevice {
pub id: DeviceId,
pub ip: String,
pub tuner_count: u8,
pub lineup_url: Option<String>,
pub discover_url: Option<String>,
pub base_url: Option<String>,
pub device_auth: Option<String>,
pub friendly_name: Option<String>,
pub model_number: Option<String>,
pub firmware_name: Option<String>,
pub firmware_version: Option<String>,
pub device_type: Option<String>,
pub discovery_tags: Vec<DeviceField>,
pub raw_discover_json: Option<Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LineupEntry {
pub channel: Channel,
pub stream_url: String,
pub tags: Vec<String>,
pub raw: Value,
}
pub struct HdhomerunStream {
pub url: String,
reader: Box<dyn Read + Send>,
}
impl std::fmt::Debug for HdhomerunStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HdhomerunStream")
.field("url", &self.url)
.finish_non_exhaustive()
}
}
impl Read for HdhomerunStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.reader.read(buf)
}
}
#[derive(Debug, Clone, Deserialize)]
struct DiscoverJson {
#[serde(rename = "DeviceID")]
device_id: Option<String>,
#[serde(rename = "DeviceAuth")]
device_auth: Option<String>,
#[serde(rename = "BaseURL")]
base_url: Option<String>,
#[serde(rename = "LineupURL")]
lineup_url: Option<String>,
#[serde(rename = "DiscoverURL")]
discover_url: Option<String>,
#[serde(rename = "FriendlyName")]
friendly_name: Option<String>,
#[serde(rename = "ModelNumber")]
model_number: Option<String>,
#[serde(rename = "FirmwareName")]
firmware_name: Option<String>,
#[serde(rename = "FirmwareVersion")]
firmware_version: Option<String>,
#[serde(rename = "DeviceType")]
device_type: Option<String>,
#[serde(rename = "TunerCount")]
tuner_count: Option<u8>,
}
#[derive(Debug, Clone, Deserialize)]
struct LineupJsonEntry {
#[serde(rename = "GuideNumber")]
guide_number: Option<String>,
#[serde(rename = "GuideName")]
guide_name: Option<String>,
#[serde(rename = "Tags")]
tags: Option<String>,
#[serde(rename = "URL")]
url: Option<String>,
}
/// Discover devices using UDP broadcast, then hydrate with /discover.json when possible.
pub fn discover() -> Result<Vec<HdhomerunDevice>> {
let mut devices = discover_udp(Duration::from_millis(400))?;
if devices.is_empty() {
if let Ok(device) = discover_from_host("hdhomerun.local") {
devices.push(device);
}
}
Ok(devices)
}
/// Discover a device by hostname or IP using the HTTP discover.json endpoint.
pub fn discover_from_host(host: &str) -> Result<HdhomerunDevice> {
let base_url = format!("http://{host}");
let discover_url = format!("{base_url}/discover.json");
let json = fetch_json(&discover_url)?;
let discover: DiscoverJson = serde_json::from_value(json.clone())
.with_context(|| format!("invalid discover.json from {discover_url}"))?;
let device = HdhomerunDevice {
id: DeviceId(
discover
.device_id
.clone()
.unwrap_or_else(|| "unknown".to_string()),
),
ip: host.to_string(),
tuner_count: discover.tuner_count.unwrap_or(0),
lineup_url: discover.lineup_url.clone(),
discover_url: discover.discover_url.clone().or(Some(discover_url)),
base_url: discover.base_url.clone().or(Some(base_url)),
device_auth: discover.device_auth.clone(),
friendly_name: discover.friendly_name.clone(),
model_number: discover.model_number.clone(),
firmware_name: discover.firmware_name.clone(),
firmware_version: discover.firmware_version.clone(),
device_type: discover.device_type.clone(),
discovery_tags: Vec::new(),
raw_discover_json: Some(json),
};
Ok(device)
}
/// Fetch and normalize lineup information for a device.
pub fn fetch_lineup(device: &HdhomerunDevice) -> Result<Vec<LineupEntry>> {
let lineup_url = resolve_lineup_url(device)?;
let json = fetch_json(&lineup_url)?;
lineup_from_json_value(&json, Some(&device.id))
.with_context(|| format!("invalid lineup.json from {lineup_url}"))
}
/// Parse a lineup.json file already loaded into memory.
pub fn lineup_from_json_bytes(
bytes: &[u8],
device_id: Option<&DeviceId>,
) -> Result<Vec<LineupEntry>> {
let json: Value = serde_json::from_slice(bytes)?;
lineup_from_json_value(&json, device_id)
}
/// Open a raw MPEG-TS stream by channel ID (lineup lookup required).
pub fn open_stream(device: &HdhomerunDevice, channel: &ChannelId) -> Result<HdhomerunStream> {
let lineup = fetch_lineup(device)?;
let entry = lineup
.into_iter()
.find(|entry| entry.channel.id == *channel)
.ok_or_else(|| anyhow!("channel {} not found in lineup", channel.0))?;
open_stream_entry(&entry, None)
}
/// Open a raw MPEG-TS stream from a lineup entry.
pub fn open_stream_entry(
entry: &LineupEntry,
duration_secs: Option<u32>,
) -> Result<HdhomerunStream> {
open_stream_url(&entry.stream_url, duration_secs)
}
/// Open a raw MPEG-TS stream by URL.
pub fn open_stream_url(url: &str, duration_secs: Option<u32>) -> Result<HdhomerunStream> {
let url = if let Some(duration) = duration_secs {
append_query_param(url, "duration", &duration.to_string())
} else {
url.to_string()
};
// Streams can be long-lived. Only apply read timeout when the caller requests
// `duration=...` (useful for tests and short captures).
let mut agent_builder = ureq::AgentBuilder::new().timeout_connect(Duration::from_secs(3));
if let Some(duration) = duration_secs {
agent_builder = agent_builder.timeout_read(Duration::from_secs(duration as u64 + 10));
}
let agent = agent_builder.build();
let response = agent
.get(&url)
.call()
.with_context(|| format!("failed to open stream {url}"))?;
if response.status() < 200 || response.status() >= 300 {
return Err(anyhow!(
"stream returned http {} for {}",
response.status(),
url
));
}
Ok(HdhomerunStream {
url,
reader: response.into_reader(),
})
}
pub fn find_lineup_entry_by_number<'a>(
lineup: &'a [LineupEntry],
guide_number: &str,
) -> Option<&'a LineupEntry> {
lineup
.iter()
.find(|entry| entry.channel.number.as_deref() == Some(guide_number))
}
pub fn find_lineup_entry_by_name<'a>(
lineup: &'a [LineupEntry],
guide_name: &str,
) -> Option<&'a LineupEntry> {
lineup.iter().find(|entry| entry.channel.name == guide_name)
}
fn discover_udp(timeout: Duration) -> Result<Vec<HdhomerunDevice>> {
let socket = UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0))?;
socket.set_broadcast(true)?;
socket.set_read_timeout(Some(Duration::from_millis(100)))?;
let packet = build_discover_packet()?;
let broadcast_addr = SocketAddrV4::new(Ipv4Addr::BROADCAST, DISCOVER_UDP_PORT);
socket.send_to(&packet, broadcast_addr)?;
let mut devices = Vec::new();
let start = Instant::now();
let mut buf = [0u8; 2048];
while start.elapsed() < timeout {
match socket.recv_from(&mut buf) {
Ok((len, addr)) => {
if let Ok(device) = parse_discover_response(&buf[..len], addr.ip().to_string()) {
devices.push(device);
}
}
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => continue,
Err(err) if err.kind() == std::io::ErrorKind::TimedOut => continue,
Err(err) => return Err(err.into()),
}
}
for device in devices.iter_mut() {
if let Ok(json) = try_fetch_discover_json(&device.ip) {
apply_discover_json(device, json);
}
}
Ok(devices)
}
fn build_discover_packet() -> Result<Vec<u8>> {
let mut payload = Vec::new();
payload.extend(tlv(TAG_DEVICE_TYPE, &DEVICE_TYPE_TUNER.to_be_bytes()));
payload.extend(tlv(TAG_DEVICE_ID, &DEVICE_ID_WILDCARD.to_be_bytes()));
let mut packet = Vec::with_capacity(4 + payload.len() + 4);
packet.extend(TYPE_DISCOVER_REQ.to_be_bytes());
packet.extend((payload.len() as u16).to_be_bytes());
packet.extend(payload);
let crc = crc32fast::hash(&packet);
packet.extend(crc.to_le_bytes());
Ok(packet)
}
fn parse_discover_response(bytes: &[u8], ip: String) -> Result<HdhomerunDevice> {
if bytes.len() < 8 {
return Err(anyhow!("discover reply too short"));
}
let packet_type = u16::from_be_bytes([bytes[0], bytes[1]]);
if packet_type != TYPE_DISCOVER_RPY {
return Err(anyhow!("unexpected packet type"));
}
let payload_len = u16::from_be_bytes([bytes[2], bytes[3]]) as usize;
if bytes.len() < 4 + payload_len + 4 {
return Err(anyhow!("truncated discover reply"));
}
let payload = &bytes[4..4 + payload_len];
let expected_crc = u32::from_le_bytes([
bytes[4 + payload_len],
bytes[4 + payload_len + 1],
bytes[4 + payload_len + 2],
bytes[4 + payload_len + 3],
]);
let actual_crc = crc32fast::hash(&bytes[..4 + payload_len]);
if expected_crc != actual_crc {
return Err(anyhow!("bad crc"));
}
let mut cursor = 0usize;
let mut device_id: Option<String> = None;
let mut tuner_count: Option<u8> = None;
let mut base_url: Option<String> = None;
let mut device_auth: Option<String> = None;
let mut tags: Vec<DeviceField> = Vec::new();
while cursor < payload.len() {
let tag = payload[cursor];
cursor += 1;
let (length, consumed) = read_varlen(&payload[cursor..])?;
cursor += consumed;
if cursor + length > payload.len() {
return Err(anyhow!("discover TLV length overflow"));
}
let value = &payload[cursor..cursor + length];
cursor += length;
match tag {
TAG_DEVICE_ID => {
if value.len() == 4 {
let id = u32::from_be_bytes([value[0], value[1], value[2], value[3]]);
device_id = Some(format!("{id:08X}"));
}
}
TAG_TUNER_COUNT => {
if let Some(first) = value.first() {
tuner_count = Some(*first);
}
}
TAG_BASE_URL => {
if let Ok(text) = std::str::from_utf8(value) {
base_url = Some(text.trim_end_matches('\0').to_string());
}
}
TAG_DEVICE_AUTH_STR => {
if let Ok(text) = std::str::from_utf8(value) {
device_auth = Some(text.trim_end_matches('\0').to_string());
}
}
TAG_DEVICE_AUTH_BIN => {
tags.push(DeviceField {
key: "device_auth_bin".to_string(),
value: hex::encode(value),
});
}
TAG_DEVICE_TYPE => {
tags.push(DeviceField {
key: "device_type".to_string(),
value: hex::encode(value),
});
}
other => {
tags.push(DeviceField {
key: format!("tag_{other:02X}"),
value: hex::encode(value),
});
}
}
}
let id = device_id.unwrap_or_else(|| "unknown".to_string());
let device = HdhomerunDevice {
id: DeviceId(id),
ip,
tuner_count: tuner_count.unwrap_or(0),
lineup_url: None,
discover_url: None,
base_url,
device_auth,
friendly_name: None,
model_number: None,
firmware_name: None,
firmware_version: None,
device_type: None,
discovery_tags: tags,
raw_discover_json: None,
};
Ok(device)
}
fn read_varlen(buf: &[u8]) -> Result<(usize, usize)> {
if buf.is_empty() {
return Err(anyhow!("missing varlen"));
}
let first = buf[0];
if first & 0x80 == 0 {
Ok((first as usize, 1))
} else {
if buf.len() < 2 {
return Err(anyhow!("missing varlen second byte"));
}
let len = ((first & 0x7F) as usize) | ((buf[1] as usize) << 7);
Ok((len, 2))
}
}
fn tlv(tag: u8, value: &[u8]) -> Vec<u8> {
let mut out = Vec::with_capacity(2 + value.len());
out.push(tag);
out.extend(encode_varlen(value.len()));
out.extend(value);
out
}
fn encode_varlen(len: usize) -> Vec<u8> {
if len <= 0x7F {
vec![len as u8]
} else {
vec![((len & 0x7F) as u8) | 0x80, (len >> 7) as u8]
}
}
fn fetch_json(url: &str) -> Result<Value> {
let agent = ureq::AgentBuilder::new()
.timeout_connect(Duration::from_secs(3))
.timeout_read(Duration::from_secs(6))
.build();
let response = agent
.get(url)
.call()
.with_context(|| format!("request failed for {url}"))?;
if response.status() < 200 || response.status() >= 300 {
return Err(anyhow!("http {} for {url}", response.status()));
}
let mut body = String::new();
response
.into_reader()
.read_to_string(&mut body)
.with_context(|| format!("failed to read response body for {url}"))?;
Ok(serde_json::from_str::<Value>(&body)
.with_context(|| format!("invalid json body for {url}"))?)
}
fn try_fetch_discover_json(host: &str) -> Result<Value> {
let url = format!("http://{host}/discover.json");
fetch_json(&url)
}
fn apply_discover_json(device: &mut HdhomerunDevice, json: Value) {
if let Ok(discover) = serde_json::from_value::<DiscoverJson>(json.clone()) {
if let Some(device_id) = discover.device_id {
device.id = DeviceId(device_id);
}
if let Some(tuner_count) = discover.tuner_count {
device.tuner_count = tuner_count;
}
device.lineup_url = discover.lineup_url.or(device.lineup_url.take());
device.discover_url = discover.discover_url.or(device.discover_url.take());
device.base_url = discover.base_url.or(device.base_url.take());
device.device_auth = discover.device_auth.or(device.device_auth.take());
device.friendly_name = discover.friendly_name.or(device.friendly_name.take());
device.model_number = discover.model_number.or(device.model_number.take());
device.firmware_name = discover.firmware_name.or(device.firmware_name.take());
device.firmware_version = discover.firmware_version.or(device.firmware_version.take());
device.device_type = discover.device_type.or(device.device_type.take());
}
device.raw_discover_json = Some(json);
}
fn resolve_lineup_url(device: &HdhomerunDevice) -> Result<String> {
if let Some(lineup_url) = device.lineup_url.as_ref() {
return Ok(lineup_url.clone());
}
if let Some(base_url) = device.base_url.as_ref() {
return Ok(format!("{base_url}/lineup.json"));
}
if !device.ip.is_empty() {
return Ok(format!("http://{}/lineup.json", device.ip));
}
Err(anyhow!("no lineup URL available"))
}
fn append_query_param(url: &str, key: &str, value: &str) -> String {
if url.contains('?') {
format!("{url}&{key}={value}")
} else {
format!("{url}?{key}={value}")
}
}
fn lineup_from_json_value(json: &Value, device_id: Option<&DeviceId>) -> Result<Vec<LineupEntry>> {
let entries = json
.as_array()
.ok_or_else(|| anyhow!("lineup json is not an array"))?;
let mut output = Vec::with_capacity(entries.len());
for (index, entry) in entries.iter().enumerate() {
let parsed: LineupJsonEntry = serde_json::from_value(entry.clone())
.with_context(|| format!("invalid lineup entry at index {index}"))?;
let guide_number = parsed.guide_number.clone();
let guide_name = parsed
.guide_name
.clone()
.or_else(|| guide_number.clone())
.unwrap_or_else(|| format!("Channel {index}"));
let tags = parsed
.tags
.unwrap_or_default()
.split(',')
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect::<Vec<_>>();
let url = parsed.url.clone().unwrap_or_else(|| "".to_string());
let id = match (device_id, guide_number.as_ref()) {
(Some(device_id), Some(guide_number)) => {
ChannelId(format!("hdhr:{}:{}", device_id.0, guide_number))
}
(_, Some(guide_number)) => ChannelId(guide_number.clone()),
(_, None) => ChannelId(format!("hdhr:unknown:{index}")),
};
let mut metadata = Vec::new();
for tag in &tags {
metadata.push(ChannelMetadata::Extra("tag".to_string(), tag.clone()));
}
if let Some(guide_number) = guide_number.clone() {
metadata.push(ChannelMetadata::Extra(
"guide_number".to_string(),
guide_number,
));
}
if let Some(obj) = entry.as_object() {
for (key, value) in obj.iter() {
if key == "GuideNumber" || key == "GuideName" || key == "Tags" || key == "URL" {
continue;
}
metadata.push(ChannelMetadata::Extra(key.clone(), value.to_string()));
}
}
let channel = Channel {
id,
name: guide_name,
number: parsed.guide_number,
program_id: None,
metadata,
};
output.push(LineupEntry {
channel,
stream_url: url,
tags,
raw: entry.clone(),
});
}
Ok(output)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn varlen_roundtrip_small_and_large() {
for len in [0usize, 1, 10, 127, 128, 200, 1024] {
let enc = encode_varlen(len);
let (decoded, consumed) = read_varlen(&enc).unwrap();
assert_eq!(decoded, len);
assert_eq!(consumed, enc.len());
}
}
#[test]
fn parse_discover_response_happy_path() {
let device_id = 0x10ACEBB9u32;
let ip = "192.0.2.10"; // RFC 5737 TEST-NET-1
let mut payload = Vec::new();
payload.extend(tlv(TAG_DEVICE_ID, &device_id.to_be_bytes()));
payload.extend(tlv(TAG_TUNER_COUNT, &[4u8]));
payload.extend(tlv(TAG_BASE_URL, b"http://192.0.2.10\0"));
payload.extend(tlv(TAG_DEVICE_AUTH_STR, b"auth-token\0"));
payload.extend(tlv(0x99, b"unknown"));
let mut packet = Vec::new();
packet.extend(TYPE_DISCOVER_RPY.to_be_bytes());
packet.extend((payload.len() as u16).to_be_bytes());
packet.extend(&payload);
let crc = crc32fast::hash(&packet);
packet.extend(crc.to_le_bytes());
let dev = parse_discover_response(&packet, ip.to_string()).unwrap();
assert_eq!(dev.id.0, "10ACEBB9");
assert_eq!(dev.ip, ip);
assert_eq!(dev.tuner_count, 4);
assert_eq!(dev.base_url.as_deref(), Some("http://192.0.2.10"));
assert_eq!(dev.device_auth.as_deref(), Some("auth-token"));
assert!(dev.discovery_tags.iter().any(|t| t.key == "tag_99"));
}
#[test]
fn parse_discover_response_rejects_bad_crc() {
let mut payload = Vec::new();
payload.extend(tlv(TAG_TUNER_COUNT, &[2u8]));
let mut packet = Vec::new();
packet.extend(TYPE_DISCOVER_RPY.to_be_bytes());
packet.extend((payload.len() as u16).to_be_bytes());
packet.extend(&payload);
let crc = crc32fast::hash(&packet);
packet.extend(crc.to_le_bytes());
// corrupt the last byte
*packet.last_mut().unwrap() ^= 0xFF;
assert!(parse_discover_response(&packet, "1.2.3.4".to_string()).is_err());
}
#[test]
fn lineup_parsing_generates_channel_ids_and_metadata() {
let device_id = DeviceId("ABCDEF01".to_string());
let json = serde_json::json!([
{
"GuideNumber": "2.1",
"GuideName": "KCBS-HD",
"Tags": "drm,encrypted,",
"URL": "http://hdhr/auto/v2.1",
"Foo": "Bar"
},
{
"GuideNumber": "2.2",
"GuideName": "StartTV",
"Tags": "",
"URL": "http://hdhr/auto/v2.2"
}
]);
let entries = lineup_from_json_value(&json, Some(&device_id)).unwrap();
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].channel.id.0, "hdhr:ABCDEF01:2.1");
assert_eq!(entries[0].channel.name, "KCBS-HD");
assert_eq!(entries[0].channel.number.as_deref(), Some("2.1"));
assert_eq!(entries[0].stream_url, "http://hdhr/auto/v2.1");
assert!(entries[0].tags.iter().any(|t| t == "drm"));
assert!(entries[0].channel.metadata.iter().any(|m| match m {
ChannelMetadata::Extra(key, value) => key == "guide_number" && value == "2.1",
_ => false,
}));
assert!(entries[0].channel.metadata.iter().any(|m| match m {
ChannelMetadata::Extra(key, _) => key == "Foo",
_ => false,
}));
}
}

16
crates/ec-iroh/Cargo.toml Normal file
View file

@ -0,0 +1,16 @@
[package]
name = "ec-iroh"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
blake3 = "1"
bytes = "1"
ec-core = { path = "../ec-core" }
futures-lite = "2"
iroh = { version = "0.96", features = ["address-lookup-mdns", "address-lookup-pkarr-dht"] }
iroh-gossip = { path = "../../third_party/iroh-org/iroh-gossip", features = ["net"] }
serde_json.workspace = true
tokio = { version = "1", features = ["time"] }

328
crates/ec-iroh/src/lib.rs Normal file
View file

@ -0,0 +1,328 @@
//! iroh transport scaffolding for every.channel.
use anyhow::{Context, Result};
use bytes::Bytes;
use ec_core::StreamCatalogEntry;
use futures_lite::StreamExt;
use iroh::address_lookup::{
DhtAddressLookup, DiscoveryEvent, DnsAddressLookup, MdnsAddressLookup, PkarrPublisher, UserData,
};
use iroh::endpoint::RelayMode;
use iroh::{
address_lookup::memory::MemoryLookup, protocol::Router, Endpoint, EndpointAddr, PublicKey,
SecretKey,
};
use iroh_gossip::{
api::{Event, GossipReceiver, GossipSender},
net::{Gossip, GOSSIP_ALPN},
proto::TopicId,
};
use std::collections::BTreeMap;
use std::env;
use std::time::{Duration, Instant};
pub const ALPN_MOQ: &[u8] = b"every.channel/moq/0";
pub const DEFAULT_CATALOG_TOPIC: &str = "every.channel/catalog/v1";
pub const MDNS_USER_DATA: &str = "every.channel";
#[derive(Debug, Clone)]
pub struct TokenBucket {
capacity: u64,
tokens: f64,
refill_per_sec: f64,
last_refill: Instant,
}
impl TokenBucket {
pub fn new(capacity: u64, refill_per_sec: u64) -> Self {
let capacity = capacity.max(1);
let refill_per_sec = refill_per_sec.max(1) as f64;
Self {
capacity,
tokens: capacity as f64,
refill_per_sec,
last_refill: Instant::now(),
}
}
pub fn allow(&mut self, amount: u64) -> bool {
self.refill();
let amount = amount as f64;
if amount <= self.tokens {
self.tokens -= amount;
true
} else {
false
}
}
fn refill(&mut self) {
let now = Instant::now();
let elapsed = now.duration_since(self.last_refill).as_secs_f64();
if elapsed <= 0.0 {
return;
}
self.tokens = (self.tokens + elapsed * self.refill_per_sec).min(self.capacity as f64);
self.last_refill = now;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn token_bucket_allows_and_refills() {
let mut bucket = TokenBucket::new(10, 10);
assert!(bucket.allow(7));
assert!(bucket.allow(3));
assert!(!bucket.allow(1));
// Force a refill without sleeping.
bucket.last_refill = Instant::now() - Duration::from_secs(1);
assert!(bucket.allow(1));
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct DiscoveryConfig {
pub dht: bool,
pub mdns: bool,
pub dns: bool,
}
impl DiscoveryConfig {
pub fn from_env() -> Result<Self> {
match env::var("EVERY_CHANNEL_IROH_DISCOVERY") {
Ok(value) => Self::from_list(&value),
Err(env::VarError::NotPresent) => Ok(Self::default()),
Err(err) => Err(err.into()),
}
}
pub fn from_list(value: &str) -> Result<Self> {
let mut config = DiscoveryConfig::default();
for raw in value.split(|c: char| c == ',' || c == ';' || c.is_whitespace()) {
let token = raw.trim().to_ascii_lowercase();
if token.is_empty() {
continue;
}
match token.as_str() {
"dht" => config.dht = true,
"mdns" => config.mdns = true,
"dns" => config.dns = true,
"all" => {
config.dht = true;
config.mdns = true;
config.dns = true;
}
"none" | "off" => {
config = DiscoveryConfig::default();
}
_ => {
return Err(anyhow::anyhow!("unknown discovery mode: {token}"));
}
}
}
Ok(config)
}
}
pub async fn build_endpoint(
secret: Option<SecretKey>,
discovery: DiscoveryConfig,
) -> Result<Endpoint> {
let relay_mode = relay_mode_from_env().unwrap_or(RelayMode::Default);
let mut builder = Endpoint::empty_builder(relay_mode);
if let Some(secret) = secret {
builder = builder.secret_key(secret);
}
if discovery.dns {
builder = builder
.address_lookup(PkarrPublisher::n0_dns())
.address_lookup(DnsAddressLookup::n0_dns());
}
if discovery.dht {
builder = builder.address_lookup(DhtAddressLookup::builder());
}
if discovery.mdns {
builder = builder.address_lookup(MdnsAddressLookup::builder());
}
let endpoint = builder.bind().await?;
endpoint.set_alpns(vec![ALPN_MOQ.to_vec()]);
Ok(endpoint)
}
fn relay_mode_from_env() -> Result<RelayMode> {
let value = match env::var("EVERY_CHANNEL_IROH_RELAY") {
Ok(value) => value,
Err(env::VarError::NotPresent) => return Ok(RelayMode::Default),
Err(err) => return Err(err.into()),
};
match value.trim().to_ascii_lowercase().as_str() {
"" | "default" => Ok(RelayMode::Default),
"disabled" | "off" => Ok(RelayMode::Disabled),
other => Err(anyhow::anyhow!("unknown relay mode: {other}")),
}
}
pub async fn start_endpoint() -> Result<Endpoint> {
let discovery = DiscoveryConfig::from_env()?;
build_endpoint(None, discovery).await
}
pub fn catalog_topic() -> TopicId {
let hash = blake3::hash(DEFAULT_CATALOG_TOPIC.as_bytes());
TopicId::from_bytes(*hash.as_bytes())
}
pub fn parse_endpoint_addr(value: &str) -> Result<EndpointAddr> {
let value = value.trim();
if value.starts_with('{') {
let addr =
serde_json::from_str::<EndpointAddr>(value).context("invalid EndpointAddr json")?;
return Ok(addr);
}
let id = value.parse::<PublicKey>().context("invalid endpoint id")?;
Ok(EndpointAddr::new(id))
}
#[derive(Debug, Clone)]
pub struct MdnsDiscovery {
mdns: MdnsAddressLookup,
endpoint_id: PublicKey,
user_data: Option<UserData>,
}
impl MdnsDiscovery {
pub async fn start(
endpoint: &Endpoint,
user_data: Option<&str>,
advertise: bool,
) -> Result<Self> {
let mdns = MdnsAddressLookup::builder()
.advertise(advertise)
.build(endpoint.id())
.context("mdns address lookup failed")?;
endpoint.address_lookup().add(mdns.clone());
let user_data = if let Some(value) = user_data {
let data = UserData::try_from(value.to_string()).context("invalid mdns user data")?;
endpoint.set_user_data_for_address_lookup(Some(data.clone()));
Some(data)
} else {
None
};
Ok(Self {
mdns,
endpoint_id: endpoint.id(),
user_data,
})
}
pub async fn discover_peers(&self, timeout: Duration) -> Result<Vec<EndpointAddr>> {
let mut stream = self.mdns.subscribe().await;
let deadline = Instant::now() + timeout;
let mut peers: BTreeMap<PublicKey, EndpointAddr> = BTreeMap::new();
loop {
let now = Instant::now();
if now >= deadline {
break;
}
let remaining = deadline - now;
match tokio::time::timeout(remaining, stream.next()).await {
Ok(Some(DiscoveryEvent::Discovered { endpoint_info, .. })) => {
if endpoint_info.endpoint_id == self.endpoint_id {
continue;
}
if let Some(expected) = self.user_data.as_ref() {
if endpoint_info.data.user_data() != Some(expected) {
continue;
}
}
let addr = EndpointAddr::from(endpoint_info);
peers.insert(addr.id, addr);
}
Ok(Some(DiscoveryEvent::Expired { .. })) => {}
Ok(None) => break,
Err(_) => break,
}
}
Ok(peers.into_values().collect())
}
}
#[derive(Debug)]
pub struct CatalogGossip {
sender: GossipSender,
receiver: GossipReceiver,
_router: Router,
_gossip: Gossip,
_memory_lookup: MemoryLookup,
}
impl CatalogGossip {
pub async fn join(endpoint: Endpoint, peers: &[String]) -> Result<Self> {
let memory_lookup = MemoryLookup::new();
endpoint.address_lookup().add(memory_lookup.clone());
let gossip = Gossip::builder().spawn(endpoint.clone());
let router = Router::builder(endpoint.clone())
.accept(GOSSIP_ALPN, gossip.clone())
.spawn();
let peer_addrs = peers
.iter()
.map(|peer| parse_endpoint_addr(peer))
.collect::<Result<Vec<_>, _>>()
.context("failed to parse gossip peer addr")?;
for peer in &peer_addrs {
memory_lookup.add_endpoint_info(peer.clone());
}
let peer_ids = peer_addrs
.iter()
.map(|addr| addr.id)
.collect::<Vec<PublicKey>>();
let (sender, receiver) = gossip
.subscribe_and_join(catalog_topic(), peer_ids)
.await?
.split();
Ok(Self {
sender,
receiver,
_router: router,
_gossip: gossip,
_memory_lookup: memory_lookup,
})
}
pub async fn announce(&mut self, entry: StreamCatalogEntry) -> Result<()> {
let bytes = serde_json::to_vec(&entry)?;
self.sender.broadcast(Bytes::from(bytes)).await?;
Ok(())
}
pub async fn next_entry(&mut self) -> Result<Option<StreamCatalogEntry>> {
while let Some(event) = self.receiver.try_next().await? {
if let Event::Received(msg) = event {
if let Ok(entry) = serde_json::from_slice::<StreamCatalogEntry>(&msg.content) {
return Ok(Some(entry));
}
}
}
Ok(None)
}
/// Add peers after the gossip topic has already been joined. This enables
/// "nearby" discovery to continuously contribute new peers over time.
pub fn add_peers(&self, peers: Vec<EndpointAddr>) {
for peer in peers {
self._memory_lookup.add_endpoint_info(peer);
}
}
}

View file

@ -0,0 +1,9 @@
[package]
name = "ec-linux-iptv"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
serde.workspace = true

View file

@ -0,0 +1,292 @@
//! Linux IPTV (LinuxDVB) ingest scaffolding.
use anyhow::{anyhow, Result};
use serde::{Deserialize, Serialize};
use std::collections::BTreeSet;
use std::fs;
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::Child;
#[cfg(target_os = "linux")]
use std::{process::Command, time::Duration};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LinuxDvbConfig {
pub adapter: u32,
pub frontend: u32,
pub dvr: u32,
pub tune_command: Option<Vec<String>>,
pub tune_timeout_ms: Option<u64>,
}
#[derive(Debug)]
pub struct LinuxDvbStream {
file: File,
_tuner: Option<Child>,
pub path: PathBuf,
}
impl Read for LinuxDvbStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.file.read(buf)
}
}
/// Open the Linux DVB DVR device. Optionally spawns a tune command (like dvbv5-zap).
#[cfg(target_os = "linux")]
pub fn open_stream(config: &LinuxDvbConfig) -> Result<LinuxDvbStream> {
let tuner = if let Some(cmd) = config.tune_command.clone() {
spawn_tune_command(cmd, config.tune_timeout_ms)?
} else {
None
};
let path = dvb_path(config.adapter, config.dvr);
let file =
File::open(&path).map_err(|err| anyhow!("failed to open {}: {err}", path.display()))?;
Ok(LinuxDvbStream {
file,
_tuner: tuner,
path,
})
}
#[cfg(not(target_os = "linux"))]
pub fn open_stream(_config: &LinuxDvbConfig) -> Result<LinuxDvbStream> {
Err(anyhow!("Linux DVB support requires Linux"))
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LinuxDvbAdapterInfo {
pub adapter: u32,
pub dvrs: Vec<u32>,
pub frontends: Vec<u32>,
}
pub fn list_adapters() -> Result<Vec<LinuxDvbAdapterInfo>> {
list_adapters_in(Path::new("/dev/dvb"))
}
fn list_adapters_in(root: &Path) -> Result<Vec<LinuxDvbAdapterInfo>> {
if !root.exists() {
return Ok(Vec::new());
}
let mut adapters = Vec::new();
for entry in fs::read_dir(root)? {
let entry = entry?;
if !entry.file_type()?.is_dir() {
continue;
}
let name = entry.file_name();
let name = name.to_string_lossy();
if !name.starts_with("adapter") {
continue;
}
let Ok(adapter) = name.trim_start_matches("adapter").parse::<u32>() else {
continue;
};
let path = entry.path();
let mut dvrs = BTreeSet::new();
let mut frontends = BTreeSet::new();
for dev in fs::read_dir(&path)? {
let dev = dev?;
let dev_name = dev.file_name().to_string_lossy().to_string();
if dev_name.starts_with("dvr") {
if let Ok(idx) = dev_name.trim_start_matches("dvr").parse::<u32>() {
dvrs.insert(idx);
}
} else if dev_name.starts_with("frontend") {
if let Ok(idx) = dev_name.trim_start_matches("frontend").parse::<u32>() {
frontends.insert(idx);
}
}
}
adapters.push(LinuxDvbAdapterInfo {
adapter,
dvrs: dvrs.into_iter().collect(),
frontends: frontends.into_iter().collect(),
});
}
adapters.sort_by_key(|info| info.adapter);
Ok(adapters)
}
pub fn channels_conf_candidates() -> Vec<PathBuf> {
// Prefer an explicit path for determinism and testability.
if let Ok(value) = std::env::var("EVERY_CHANNEL_DVB_CHANNELS_CONF") {
let value = value.trim();
if !value.is_empty() {
return vec![PathBuf::from(value)];
}
}
let home = std::env::var("HOME").ok().map(PathBuf::from);
let mut out = Vec::new();
if let Some(home) = home {
out.push(home.join(".dvb").join("channels.conf"));
out.push(home.join(".config").join("dvb").join("channels.conf"));
}
out.push(PathBuf::from("/etc/dvb/channels.conf"));
out
}
pub fn find_channels_conf() -> Option<PathBuf> {
for candidate in channels_conf_candidates() {
if candidate.exists() {
return Some(candidate);
}
}
None
}
pub fn parse_channels_conf(path: &Path) -> Result<Vec<String>> {
let text = fs::read_to_string(path)
.map_err(|err| anyhow!("failed to read {}: {err}", path.display()))?;
let mut channels = BTreeSet::new();
for line in text.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
if let Some((name, _)) = line.split_once(':') {
let name = name.trim();
if !name.is_empty() {
channels.insert(name.to_string());
}
}
}
Ok(channels.into_iter().collect())
}
pub fn default_zap_tune_command(adapter: u32, channels_conf: &Path, channel: &str) -> Vec<String> {
vec![
"dvbv5-zap".to_string(),
"-a".to_string(),
adapter.to_string(),
"-c".to_string(),
channels_conf.display().to_string(),
"-r".to_string(),
channel.to_string(),
]
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_channels_conf_extracts_names() {
let dir = std::env::temp_dir().join(format!("ec-channels-{}", std::process::id()));
let _ = fs::create_dir_all(&dir);
let path = dir.join("channels.conf");
fs::write(
&path,
"\
# comment
KQED:foo
KQED:duplicate
KCBS-HD:bar
",
)
.unwrap();
let channels = parse_channels_conf(&path).unwrap();
assert_eq!(channels, vec!["KCBS-HD".to_string(), "KQED".to_string()]);
let _ = fs::remove_file(&path);
}
#[test]
fn default_zap_command_contains_adapter_and_channel() {
let conf = Path::new("/tmp/channels.conf");
let cmd = default_zap_tune_command(2, conf, "KQED");
assert_eq!(cmd[0], "dvbv5-zap");
assert!(cmd.iter().any(|arg| arg == "2"));
assert!(cmd.iter().any(|arg| arg == "KQED"));
}
#[test]
fn find_channels_conf_prefers_env_override() {
let dir = std::env::temp_dir().join(format!("ec-channels-env-{}", std::process::id()));
let _ = fs::create_dir_all(&dir);
let path = dir.join("channels.conf");
fs::write(&path, "KQED:foo\n").unwrap();
let prev = std::env::var("EVERY_CHANNEL_DVB_CHANNELS_CONF").ok();
std::env::set_var(
"EVERY_CHANNEL_DVB_CHANNELS_CONF",
path.display().to_string(),
);
let found = find_channels_conf().unwrap();
assert_eq!(found, path);
match prev {
Some(value) => std::env::set_var("EVERY_CHANNEL_DVB_CHANNELS_CONF", value),
None => std::env::remove_var("EVERY_CHANNEL_DVB_CHANNELS_CONF"),
}
let _ = fs::remove_file(&path);
}
#[test]
fn list_adapters_parses_fake_dev_tree() {
let root = std::env::temp_dir().join(format!("ec-dvb-root-{}", std::process::id()));
let _ = fs::remove_dir_all(&root);
fs::create_dir_all(root.join("adapter1")).unwrap();
fs::create_dir_all(root.join("adapter0")).unwrap();
fs::write(root.join("adapter0").join("dvr0"), "").unwrap();
fs::write(root.join("adapter0").join("frontend0"), "").unwrap();
fs::write(root.join("adapter1").join("dvr2"), "").unwrap();
fs::write(root.join("adapter1").join("frontend0"), "").unwrap();
fs::write(root.join("adapter1").join("frontend1"), "").unwrap();
let list = list_adapters_in(&root).unwrap();
assert_eq!(list.len(), 2);
assert_eq!(list[0].adapter, 0);
assert_eq!(list[0].dvrs, vec![0]);
assert_eq!(list[0].frontends, vec![0]);
assert_eq!(list[1].adapter, 1);
assert_eq!(list[1].dvrs, vec![2]);
assert_eq!(list[1].frontends, vec![0, 1]);
let _ = fs::remove_dir_all(&root);
}
}
#[cfg(target_os = "linux")]
fn spawn_tune_command(command: Vec<String>, tune_timeout_ms: Option<u64>) -> Result<Option<Child>> {
if command.is_empty() {
return Ok(None);
}
let mut cmd = Command::new(&command[0]);
if command.len() > 1 {
cmd.args(&command[1..]);
}
let child = cmd.spawn()?;
if let Some(timeout_ms) = tune_timeout_ms {
std::thread::sleep(Duration::from_millis(timeout_ms));
}
Ok(Some(child))
}
#[cfg(not(target_os = "linux"))]
fn spawn_tune_command(
_command: Vec<String>,
_tune_timeout_ms: Option<u64>,
) -> Result<Option<Child>> {
Ok(None)
}
fn dvb_path(adapter: u32, dvr: u32) -> PathBuf {
Path::new("/dev/dvb")
.join(format!("adapter{adapter}"))
.join(format!("dvr{dvr}"))
}

23
crates/ec-moq/Cargo.toml Normal file
View file

@ -0,0 +1,23 @@
[package]
name = "ec-moq"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
bytes = "1"
ec-core = { path = "../ec-core" }
ec-iroh = { path = "../ec-iroh" }
iroh = "0.96"
iroh-moq = { path = "../../third_party/iroh-live/iroh-moq" }
moq-lite = "0.10.1"
serde.workspace = true
serde_json.workspace = true
tokio = { version = "1", features = ["sync", "rt", "macros"] }
tracing.workspace = true
[dev-dependencies]
blake3.workspace = true
ec-crypto = { path = "../ec-crypto" }
hex = "0.4"

832
crates/ec-moq/src/lib.rs Normal file
View file

@ -0,0 +1,832 @@
//! Media over QUIC (MoQ) scaffolding.
use anyhow::{anyhow, Context, Result};
use bytes::Bytes;
use ec_core::Manifest;
use ec_iroh::DiscoveryConfig;
use iroh::{protocol::Router, Endpoint, EndpointAddr, SecretKey};
use moq_lite::{BroadcastConsumer, BroadcastProducer, Group, Track};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrackName {
pub namespace: String,
pub name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GroupId(pub u64);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectId(pub u64);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectMeta {
pub created_unix_ms: u64,
pub content_type: String,
pub size_bytes: u64,
pub timing: Option<TimingMeta>,
pub encryption: Option<EncryptionMeta>,
pub chunk_hash: Option<String>,
pub chunk_hash_alg: Option<String>,
pub chunk_proof: Option<Vec<String>>,
pub chunk_proof_alg: Option<String>,
pub manifest_id: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectPayload {
pub meta: ObjectMeta,
pub data: Vec<u8>,
}
pub const DEFAULT_TRACK_NAME: &str = "chunks";
pub const DEFAULT_MANIFEST_TRACK_NAME: &str = "manifests";
pub trait Publisher {
fn publish_object(
&self,
track: &TrackName,
group: GroupId,
object: ObjectPayload,
) -> Result<()>;
}
pub trait Subscriber {
fn subscribe_track(&self, track: &TrackName) -> Result<()>;
}
pub trait Relay {
fn announce_track(&self, track: &TrackName) -> Result<()>;
fn cache_object(&self, track: &TrackName, group: GroupId, object: ObjectPayload) -> Result<()>;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimingMeta {
pub chunk_index: u64,
pub chunk_start_27mhz: u64,
pub chunk_duration_27mhz: u64,
pub utc_start_unix: Option<i64>,
pub sync_status: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EncryptionMeta {
pub alg: String,
pub key_id: String,
pub nonce_hex: String,
}
#[derive(Debug, Clone)]
pub struct FileRelay {
root: PathBuf,
}
impl FileRelay {
pub fn new(root: impl Into<PathBuf>) -> Self {
Self { root: root.into() }
}
pub fn write_object(
&self,
track: &TrackName,
group: GroupId,
object_id: ObjectId,
object: &ObjectPayload,
) -> Result<()> {
let base = self.object_dir(track, group, object_id);
fs::create_dir_all(&base)
.with_context(|| format!("failed to create {}", base.display()))?;
let data_path = base.join("data.bin");
let meta_path = base.join("meta.json");
fs::write(&data_path, &object.data)
.with_context(|| format!("failed to write {}", data_path.display()))?;
fs::write(&meta_path, serde_json::to_vec_pretty(&object.meta)?)
.with_context(|| format!("failed to write {}", meta_path.display()))?;
Ok(())
}
fn object_dir(&self, track: &TrackName, group: GroupId, object_id: ObjectId) -> PathBuf {
let namespace = sanitize_component(&track.namespace);
let name = sanitize_component(&track.name);
self.root
.join(namespace)
.join(name)
.join(format!("group-{}", group.0))
.join(format!("object-{}", object_id.0))
}
}
impl Relay for FileRelay {
fn announce_track(&self, _track: &TrackName) -> Result<()> {
Ok(())
}
fn cache_object(&self, track: &TrackName, group: GroupId, object: ObjectPayload) -> Result<()> {
self.write_object(track, group, ObjectId(0), &object)
}
}
fn sanitize_component(value: &str) -> String {
value
.chars()
.map(|c| match c {
'a'..='z' | '0'..='9' | '-' | '_' => c,
'A'..='Z' => c.to_ascii_lowercase(),
_ => '_',
})
.collect()
}
pub fn encode_object_frame(meta: &ObjectMeta, data: &[u8]) -> Result<Vec<u8>> {
let meta_bytes = serde_json::to_vec(meta)?;
let meta_len = u32::try_from(meta_bytes.len()).map_err(|_| anyhow!("object meta too large"))?;
let mut out = Vec::with_capacity(4 + meta_bytes.len() + data.len());
out.extend_from_slice(&meta_len.to_be_bytes());
out.extend_from_slice(&meta_bytes);
out.extend_from_slice(data);
Ok(out)
}
pub fn decode_object_frame(bytes: &[u8]) -> Result<ObjectPayload> {
if bytes.len() < 4 {
return Err(anyhow!("object frame too short"));
}
let meta_len = u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]) as usize;
if bytes.len() < 4 + meta_len {
return Err(anyhow!("object frame missing metadata bytes"));
}
let meta = serde_json::from_slice(&bytes[4..4 + meta_len])?;
let data = bytes[4 + meta_len..].to_vec();
Ok(ObjectPayload { meta, data })
}
pub fn encode_manifest_frame(manifest: &Manifest) -> Result<Vec<u8>> {
Ok(serde_json::to_vec(manifest)?)
}
pub fn decode_manifest_frame(bytes: &[u8]) -> Result<Manifest> {
Ok(serde_json::from_slice(bytes)?)
}
#[derive(Debug)]
pub struct MoqNode {
endpoint: Endpoint,
router: Router,
moq: iroh_moq::Moq,
}
impl MoqNode {
pub async fn bind(secret: Option<SecretKey>) -> Result<Self> {
let discovery = DiscoveryConfig::from_env()?;
Self::bind_with_discovery(secret, discovery).await
}
pub async fn bind_with_discovery(
secret: Option<SecretKey>,
discovery: DiscoveryConfig,
) -> Result<Self> {
let endpoint = ec_iroh::build_endpoint(secret, discovery).await?;
let moq = iroh_moq::Moq::new(endpoint.clone());
let router = Router::builder(endpoint.clone())
.accept(iroh_moq::ALPN, moq.protocol_handler())
.spawn();
Ok(Self {
endpoint,
router,
moq,
})
}
pub fn endpoint(&self) -> &Endpoint {
&self.endpoint
}
pub fn endpoint_addr(&self) -> EndpointAddr {
self.router.endpoint().addr()
}
pub async fn publish_objects(
&self,
broadcast_name: impl Into<String>,
track_name: impl Into<String>,
) -> Result<MoqPublisher> {
let broadcast_name = broadcast_name.into();
let track_name = track_name.into();
let mut broadcast = BroadcastProducer::default();
let track = broadcast.create_track(Track {
name: track_name.clone(),
priority: 0,
});
self.moq
.publish(broadcast_name.clone(), broadcast.clone())
.await?;
Ok(MoqPublisher {
broadcast_name,
track_name,
broadcast,
track,
})
}
/// Publish a broadcast containing multiple tracks, all created before publishing.
///
/// This avoids subtle issues in some MoQ implementations where tracks added after the
/// initial publish are not reliably deliverable to subscribers.
pub async fn publish_track_set(
&self,
broadcast_name: impl Into<String>,
object_tracks: Vec<String>,
manifest_tracks: Vec<String>,
) -> Result<MoqPublishSet> {
let broadcast_name = broadcast_name.into();
let mut broadcast = BroadcastProducer::default();
let mut object = HashMap::new();
for name in object_tracks {
let track = broadcast.create_track(Track {
name: name.clone(),
priority: 0,
});
object.insert(name, track);
}
let mut manifests = HashMap::new();
for name in manifest_tracks {
let track = broadcast.create_track(Track {
name: name.clone(),
priority: 0,
});
manifests.insert(name, track);
}
self.moq.publish(broadcast_name.clone(), broadcast).await?;
Ok(MoqPublishSet {
broadcast_name,
object,
manifests,
})
}
pub async fn subscribe_objects(
&self,
remote: EndpointAddr,
broadcast_name: impl Into<String>,
track_name: impl Into<String>,
) -> Result<MoqObjectStream> {
let broadcast_name = broadcast_name.into();
let track_name = track_name.into();
let mut session = self.moq.connect(remote).await?;
let broadcast = session.subscribe(&broadcast_name).await?;
let track = subscribe_track(&broadcast, &track_name)?;
MoqObjectStream::spawn(session, track)
}
pub async fn subscribe_manifests(
&self,
remote: EndpointAddr,
broadcast_name: impl Into<String>,
track_name: impl Into<String>,
) -> Result<MoqManifestStream> {
let broadcast_name = broadcast_name.into();
let track_name = track_name.into();
let mut session = self.moq.connect(remote).await?;
let broadcast = session.subscribe(&broadcast_name).await?;
let track = subscribe_track(&broadcast, &track_name)?;
MoqManifestStream::spawn(session, track)
}
}
pub struct MoqPublishSet {
broadcast_name: String,
object: HashMap<String, moq_lite::TrackProducer>,
manifests: HashMap<String, moq_lite::TrackProducer>,
}
impl MoqPublishSet {
pub fn publish_object(
&mut self,
track_name: &str,
group: GroupId,
object: ObjectPayload,
) -> Result<()> {
let Some(track) = self.object.get_mut(track_name) else {
return Err(anyhow!("unknown object track {}", track_name));
};
let Some(mut group_writer) = track.create_group(Group { sequence: group.0 }) else {
return Err(anyhow!("group {} already published", group.0));
};
let frame = encode_object_frame(&object.meta, &object.data)?;
group_writer.write_frame(Bytes::from(frame));
group_writer.close();
Ok(())
}
pub fn publish_manifest(
&mut self,
track_name: &str,
sequence: u64,
manifest: &Manifest,
) -> Result<()> {
let Some(track) = self.manifests.get_mut(track_name) else {
return Err(anyhow!("unknown manifest track {}", track_name));
};
let Some(mut group_writer) = track.create_group(Group { sequence }) else {
return Err(anyhow!("manifest group {} already published", sequence));
};
let frame = encode_manifest_frame(manifest)?;
group_writer.write_frame(Bytes::from(frame));
group_writer.close();
Ok(())
}
pub fn broadcast_name(&self) -> &str {
&self.broadcast_name
}
}
pub struct MoqPublisher {
broadcast_name: String,
track_name: String,
broadcast: BroadcastProducer,
track: moq_lite::TrackProducer,
}
impl MoqPublisher {
pub fn publish_object(&mut self, group: GroupId, object: ObjectPayload) -> Result<()> {
let Some(mut group_writer) = self.track.create_group(Group { sequence: group.0 }) else {
return Err(anyhow!("group {} already published", group.0));
};
let frame = encode_object_frame(&object.meta, &object.data)?;
group_writer.write_frame(Bytes::from(frame));
group_writer.close();
Ok(())
}
pub fn create_side_track(&mut self, track_name: impl Into<String>) -> Result<MoqSidePublisher> {
let track_name = track_name.into();
let track = self.broadcast.create_track(Track {
name: track_name.clone(),
priority: 0,
});
Ok(MoqSidePublisher { track_name, track })
}
pub fn create_manifest_track(
&mut self,
track_name: impl Into<String>,
) -> Result<MoqManifestPublisher> {
let track_name = track_name.into();
let track = self.broadcast.create_track(Track {
name: track_name.clone(),
priority: 0,
});
Ok(MoqManifestPublisher { track_name, track })
}
pub fn broadcast_name(&self) -> &str {
&self.broadcast_name
}
pub fn track_name(&self) -> &str {
&self.track_name
}
}
pub struct MoqSidePublisher {
track_name: String,
track: moq_lite::TrackProducer,
}
impl MoqSidePublisher {
pub fn publish_object(&mut self, group: GroupId, object: ObjectPayload) -> Result<()> {
let Some(mut group_writer) = self.track.create_group(Group { sequence: group.0 }) else {
return Err(anyhow!("group {} already published", group.0));
};
let frame = encode_object_frame(&object.meta, &object.data)?;
group_writer.write_frame(Bytes::from(frame));
group_writer.close();
Ok(())
}
pub fn track_name(&self) -> &str {
&self.track_name
}
}
pub struct MoqManifestPublisher {
track_name: String,
track: moq_lite::TrackProducer,
}
impl MoqManifestPublisher {
pub fn publish_manifest(&mut self, sequence: u64, manifest: &Manifest) -> Result<()> {
let Some(mut group_writer) = self.track.create_group(Group { sequence }) else {
return Err(anyhow!("manifest group {} already published", sequence));
};
let frame = encode_manifest_frame(manifest)?;
group_writer.write_frame(Bytes::from(frame));
group_writer.close();
Ok(())
}
pub fn track_name(&self) -> &str {
&self.track_name
}
}
pub struct MoqObjectStream {
receiver: mpsc::Receiver<ObjectPayload>,
_task: JoinHandle<()>,
_session: iroh_moq::MoqSession,
}
impl MoqObjectStream {
fn spawn(session: iroh_moq::MoqSession, mut track: moq_lite::TrackConsumer) -> Result<Self> {
let (tx, rx) = mpsc::channel(32);
let task = tokio::spawn(async move {
loop {
let next_group = track.next_group().await;
let Some(mut group) = (match next_group {
Ok(group) => group,
Err(err) => {
tracing::warn!("moq track error: {err:#}");
break;
}
}) else {
break;
};
let mut buffer = Vec::new();
loop {
match group.read_frame().await {
Ok(Some(frame)) => buffer.extend_from_slice(&frame),
Ok(None) => break,
Err(err) => {
tracing::warn!("moq group error: {err:#}");
break;
}
}
}
if buffer.is_empty() {
continue;
}
match decode_object_frame(&buffer) {
Ok(object) => {
if tx.send(object).await.is_err() {
break;
}
}
Err(err) => {
tracing::warn!("failed to decode object frame: {err:#}");
}
}
}
});
Ok(Self {
receiver: rx,
_task: task,
_session: session,
})
}
pub async fn recv(&mut self) -> Option<ObjectPayload> {
self.receiver.recv().await
}
}
pub struct MoqManifestStream {
receiver: mpsc::Receiver<Manifest>,
_task: JoinHandle<()>,
_session: iroh_moq::MoqSession,
}
impl MoqManifestStream {
fn spawn(session: iroh_moq::MoqSession, mut track: moq_lite::TrackConsumer) -> Result<Self> {
let (tx, rx) = mpsc::channel(8);
let task = tokio::spawn(async move {
loop {
let next_group = track.next_group().await;
let Some(mut group) = (match next_group {
Ok(group) => group,
Err(err) => {
tracing::warn!("moq manifest track error: {err:#}");
break;
}
}) else {
break;
};
let mut buffer = Vec::new();
loop {
match group.read_frame().await {
Ok(Some(frame)) => buffer.extend_from_slice(&frame),
Ok(None) => break,
Err(err) => {
tracing::warn!("moq manifest group error: {err:#}");
break;
}
}
}
if buffer.is_empty() {
continue;
}
match decode_manifest_frame(&buffer) {
Ok(manifest) => {
if tx.send(manifest).await.is_err() {
break;
}
}
Err(err) => {
tracing::warn!("failed to decode manifest frame: {err:#}");
}
}
}
});
Ok(Self {
receiver: rx,
_task: task,
_session: session,
})
}
pub async fn recv(&mut self) -> Option<Manifest> {
self.receiver.recv().await
}
}
fn subscribe_track(broadcast: &BroadcastConsumer, name: &str) -> Result<moq_lite::TrackConsumer> {
let track = broadcast.subscribe_track(&Track::new(name));
Ok(track)
}
#[derive(Debug, Clone)]
pub struct HlsWriter {
output_dir: PathBuf,
window: usize,
target_duration: f64,
init_filename: String,
segments: std::collections::VecDeque<HlsSegment>,
}
#[derive(Debug, Clone)]
struct HlsSegment {
index: u64,
duration: f64,
filename: String,
}
impl HlsWriter {
pub fn new_cmaf(
output_dir: impl Into<PathBuf>,
target_duration: f64,
window: usize,
) -> Result<Self> {
// CMAF-only writer: init.mp4 + segment_*.m4s + HLS playlist as a local compatibility artifact.
let output_dir = output_dir.into();
fs::create_dir_all(&output_dir)
.with_context(|| format!("failed to create {}", output_dir.display()))?;
Ok(Self {
output_dir,
window: window.max(1),
target_duration,
init_filename: "init.mp4".to_string(),
segments: std::collections::VecDeque::new(),
})
}
pub fn write_init_segment(&mut self, data: &[u8]) -> Result<PathBuf> {
let path = self.output_dir.join(&self.init_filename);
fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))?;
self.write_playlist()?;
Ok(path)
}
pub fn write_segment(&mut self, index: u64, duration: f64, data: &[u8]) -> Result<PathBuf> {
let filename = format!("segment_{index:06}.m4s");
let path = self.output_dir.join(&filename);
fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))?;
self.segments.push_back(HlsSegment {
index,
duration,
filename,
});
while self.segments.len() > self.window {
self.segments.pop_front();
}
self.write_playlist()?;
Ok(path)
}
fn write_playlist(&self) -> Result<()> {
let mut lines = Vec::new();
lines.push("#EXTM3U".to_string());
lines.push("#EXT-X-VERSION:7".to_string());
lines.push("#EXT-X-INDEPENDENT-SEGMENTS".to_string());
lines.push(format!("#EXT-X-MAP:URI=\"{}\"", self.init_filename));
let target = self.target_duration.ceil().max(1.0) as u64;
lines.push(format!("#EXT-X-TARGETDURATION:{target}"));
if let Some(first) = self.segments.front() {
lines.push(format!("#EXT-X-MEDIA-SEQUENCE:{}", first.index));
}
for seg in &self.segments {
lines.push(format!("#EXTINF:{:.3},", seg.duration));
lines.push(seg.filename.clone());
}
let playlist_path = self.output_dir.join("index.m3u8");
fs::write(&playlist_path, lines.join("\n") + "\n")
.with_context(|| format!("failed to write {}", playlist_path.display()))?;
Ok(())
}
}
pub fn chunk_duration_secs(meta: &ObjectMeta, fallback: Duration) -> f64 {
if let Some(timing) = &meta.timing {
let secs = timing.chunk_duration_27mhz as f64 / 27_000_000.0;
if secs > 0.0 {
return secs;
}
}
fallback.as_secs_f64()
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn sanitize_component_is_stable() {
assert_eq!(sanitize_component("Hello World!"), "hello_world_");
assert_eq!(sanitize_component("a-b_C9"), "a-b_c9");
}
#[test]
fn object_frame_roundtrip() {
let meta = ObjectMeta {
created_unix_ms: 1,
content_type: "application/octet-stream".to_string(),
size_bytes: 3,
timing: Some(TimingMeta {
chunk_index: 7,
chunk_start_27mhz: 0,
chunk_duration_27mhz: 54_000_000,
utc_start_unix: None,
sync_status: "synthetic".to_string(),
}),
encryption: None,
chunk_hash: Some("00".repeat(32)),
chunk_hash_alg: Some("blake3".to_string()),
chunk_proof: Some(vec!["00".repeat(32)]),
chunk_proof_alg: Some("merkle+blake3".to_string()),
manifest_id: Some("m".to_string()),
};
let data = b"abc".to_vec();
let frame = encode_object_frame(&meta, &data).unwrap();
let decoded = decode_object_frame(&frame).unwrap();
assert_eq!(decoded.data, data);
assert_eq!(decoded.meta.created_unix_ms, meta.created_unix_ms);
assert_eq!(
decoded.meta.timing.as_ref().unwrap().chunk_index,
meta.timing.as_ref().unwrap().chunk_index
);
assert_eq!(decoded.meta.manifest_id, meta.manifest_id);
}
#[test]
fn decode_rejects_short_frame() {
assert!(decode_object_frame(&[]).is_err());
assert!(decode_object_frame(&[0, 0, 0]).is_err());
}
#[test]
fn manifest_frame_roundtrip() {
let manifest = ec_core::Manifest {
body: ec_core::ManifestBody {
stream_id: ec_core::StreamId("s".to_string()),
epoch_id: "e".to_string(),
chunk_duration_ms: 2000,
total_chunks: 1,
chunk_start_index: 0,
encoder_profile_id: "p".to_string(),
merkle_root: "00".repeat(32),
created_unix_ms: 1,
metadata: Vec::new(),
chunk_hashes: vec!["11".repeat(32)],
variants: None,
},
manifest_id: "m".to_string(),
signatures: Vec::new(),
};
let bytes = encode_manifest_frame(&manifest).unwrap();
let decoded = decode_manifest_frame(&bytes).unwrap();
assert_eq!(decoded.manifest_id, "m");
assert_eq!(decoded.body.epoch_id, "e");
}
#[test]
fn manifest_frame_signed_roundtrip_verifies() {
let prev = env::var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY").ok();
env::set_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", "11".repeat(32));
let keypair = ec_crypto::load_manifest_keypair_from_env()
.expect("load should not error")
.expect("keypair should exist");
let chunk_hashes = vec![blake3::hash(b"chunk0").to_hex().to_string()];
let merkle_root = ec_core::merkle_root_from_hashes(&chunk_hashes).unwrap();
let body = ec_core::ManifestBody {
stream_id: ec_core::StreamId("s".to_string()),
epoch_id: "e".to_string(),
chunk_duration_ms: 2000,
total_chunks: 1,
chunk_start_index: 0,
encoder_profile_id: "p".to_string(),
merkle_root,
created_unix_ms: 1,
metadata: Vec::new(),
chunk_hashes,
variants: None,
};
let manifest_id = body.manifest_id().unwrap();
let sig = ec_crypto::sign_manifest_id(&manifest_id, &keypair);
assert!(ec_crypto::verify_manifest_signature(&manifest_id, &sig));
let manifest = ec_core::Manifest {
body,
manifest_id: manifest_id.clone(),
signatures: vec![sig],
};
let bytes = encode_manifest_frame(&manifest).unwrap();
let decoded = decode_manifest_frame(&bytes).unwrap();
assert_eq!(decoded.manifest_id, manifest_id);
assert_eq!(decoded.signatures.len(), 1);
assert!(ec_crypto::verify_manifest_signature(
&decoded.manifest_id,
&decoded.signatures[0]
));
match prev {
Some(value) => env::set_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", value),
None => env::remove_var("EVERY_CHANNEL_MANIFEST_SIGNING_KEY"),
}
}
#[test]
fn object_frame_encrypt_decrypt_roundtrip_and_hash_matches_plaintext() {
let stream_id = "ec/stream/v1/source/test/device-a/channel-b";
let chunk_index = 7u64;
let plaintext = b"hello every.channel";
let expected_hash = blake3::hash(plaintext).to_hex().to_string();
let enc = ec_crypto::encrypt_stream_data(stream_id, chunk_index, plaintext, None);
let meta = ObjectMeta {
created_unix_ms: 1,
content_type: "application/octet-stream".to_string(),
size_bytes: enc.ciphertext.len() as u64,
timing: Some(TimingMeta {
chunk_index,
chunk_start_27mhz: 0,
chunk_duration_27mhz: 54_000_000,
utc_start_unix: None,
sync_status: "synthetic".to_string(),
}),
encryption: Some(EncryptionMeta {
alg: enc.alg.to_string(),
key_id: stream_id.to_string(),
nonce_hex: hex::encode(enc.nonce),
}),
chunk_hash: Some(expected_hash.clone()),
chunk_hash_alg: Some("blake3".to_string()),
chunk_proof: None,
chunk_proof_alg: None,
manifest_id: None,
};
let frame = encode_object_frame(&meta, &enc.ciphertext).unwrap();
let decoded = decode_object_frame(&frame).unwrap();
let out = ec_crypto::decrypt_stream_data(stream_id, chunk_index, &decoded.data, None)
.expect("decrypt should succeed");
assert_eq!(out, plaintext);
assert_eq!(
decoded.meta.chunk_hash.as_deref(),
Some(expected_hash.as_str())
);
assert_eq!(
blake3::hash(&out).to_hex().to_string(),
decoded.meta.chunk_hash.unwrap()
);
}
}

35
crates/ec-node/Cargo.toml Normal file
View file

@ -0,0 +1,35 @@
[package]
name = "ec-node"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
blake3.workspace = true
clap.workspace = true
ec-core = { path = "../ec-core" }
ec-crypto = { path = "../ec-crypto" }
ec-direct = { path = "../ec-direct" }
ec-moq = { path = "../ec-moq" }
ec-chopper = { path = "../ec-chopper" }
ec-hdhomerun = { path = "../ec-hdhomerun" }
ec-iroh = { path = "../ec-iroh" }
ec-linux-iptv = { path = "../ec-linux-iptv" }
hex = "0.4"
iroh = "0.96"
just-webrtc = "0.2"
bytes = "1"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
urlencoding = "2"
serde.workspace = true
serde_json.workspace = true
tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
tokio-tungstenite = { version = "0.24", default-features = false, features = ["connect", "rustls-tls-webpki-roots"] }
futures-util = "0.3"
tracing.workspace = true
tracing-subscriber.workspace = true
[dev-dependencies]
headless_chrome = "1"
which = "6"

4193
crates/ec-node/src/main.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,283 @@
use anyhow::{anyhow, Result};
use clap::ValueEnum;
use ec_chopper::{deterministic_h264_profile, ffmpeg_profile_args};
use ec_core::SourceId;
use ec_hdhomerun::{find_lineup_entry_by_name, find_lineup_entry_by_number};
use ec_linux_iptv::LinuxDvbConfig;
use std::io::Read;
use std::process::{Child, Command, Stdio};
use std::thread;
pub trait StreamSource: Send {
fn open_stream(&self) -> Result<Box<dyn Read + Send>>;
fn source_id(&self) -> SourceId;
}
#[derive(Debug, Clone)]
pub struct HdhrSource {
pub host: Option<String>,
pub device_id: Option<String>,
pub channel: Option<String>,
pub name: Option<String>,
pub prefer_mdns: bool,
}
impl StreamSource for HdhrSource {
fn open_stream(&self) -> Result<Box<dyn Read + Send>> {
let device = resolve_hdhr_device(self)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
let entry = if let Some(channel) = &self.channel {
find_lineup_entry_by_number(&lineup, channel)
.or_else(|| find_lineup_entry_by_name(&lineup, channel))
.ok_or_else(|| anyhow!("channel not found: {channel}"))?
} else if let Some(name) = &self.name {
find_lineup_entry_by_name(&lineup, name)
.ok_or_else(|| anyhow!("channel not found: {name}"))?
} else {
return Err(anyhow!("--channel or --name required for hdhr"));
};
Ok(Box::new(ec_hdhomerun::open_stream_entry(entry, None)?))
}
fn source_id(&self) -> SourceId {
let device_id = self.device_id.clone().or_else(|| self.host.clone());
SourceId {
kind: "hdhr".to_string(),
device_id,
channel: self.channel.clone().or_else(|| self.name.clone()),
}
}
}
fn resolve_hdhr_device(source: &HdhrSource) -> Result<ec_hdhomerun::HdhomerunDevice> {
if let Some(host) = &source.host {
return ec_hdhomerun::discover_from_host(host);
}
if let Some(device_id) = &source.device_id {
let host = format!("{device_id}.local");
return ec_hdhomerun::discover_from_host(&host);
}
if source.prefer_mdns {
if let Ok(device) = ec_hdhomerun::discover_from_host("hdhomerun.local") {
return Ok(device);
}
}
let mut devices = ec_hdhomerun::discover()?;
devices
.pop()
.ok_or_else(|| anyhow!("no HDHomeRun devices found"))
}
#[derive(Debug, Clone)]
pub struct LinuxDvbSource {
pub adapter: u32,
pub dvr: u32,
pub tune_cmd: Vec<String>,
pub tune_wait_ms: Option<u64>,
}
impl StreamSource for LinuxDvbSource {
fn open_stream(&self) -> Result<Box<dyn Read + Send>> {
let config = LinuxDvbConfig {
adapter: self.adapter,
frontend: 0,
dvr: self.dvr,
tune_command: if self.tune_cmd.is_empty() {
None
} else {
Some(self.tune_cmd.clone())
},
tune_timeout_ms: self.tune_wait_ms,
};
Ok(Box::new(ec_linux_iptv::open_stream(&config)?))
}
fn source_id(&self) -> SourceId {
SourceId {
kind: "linux-dvb".to_string(),
device_id: Some(format!("adapter{}:dvr{}", self.adapter, self.dvr)),
channel: None,
}
}
}
#[derive(Debug, Clone)]
pub struct TsSource {
pub input: String,
}
impl StreamSource for TsSource {
fn open_stream(&self) -> Result<Box<dyn Read + Send>> {
if self.input.starts_with("http://") || self.input.starts_with("https://") {
Ok(Box::new(ec_hdhomerun::open_stream_url(&self.input, None)?))
} else {
Ok(Box::new(std::fs::File::open(&self.input)?))
}
}
fn source_id(&self) -> SourceId {
SourceId {
kind: "ts".to_string(),
device_id: None,
channel: None,
}
}
}
#[derive(Debug, Clone, Copy, ValueEnum)]
pub enum HlsMode {
Passthrough,
Remux,
Transcode,
}
impl Default for HlsMode {
fn default() -> Self {
HlsMode::Passthrough
}
}
#[derive(Debug, Clone)]
pub struct HlsSource {
pub url: String,
pub mode: HlsMode,
}
impl StreamSource for HlsSource {
fn open_stream(&self) -> Result<Box<dyn Read + Send>> {
let mut cmd = Command::new("ffmpeg");
cmd.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-i")
.arg(&self.url);
match self.mode {
HlsMode::Passthrough => {
cmd.arg("-c").arg("copy");
}
HlsMode::Remux => {
cmd.arg("-fflags").arg("+genpts").arg("-c").arg("copy");
}
HlsMode::Transcode => {
let profile = deterministic_h264_profile();
for arg in ffmpeg_profile_args(&profile) {
cmd.arg(arg);
}
}
}
cmd.arg("-f")
.arg("mpegts")
.arg("pipe:1")
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut child = cmd
.spawn()
.map_err(|err| anyhow!("failed to spawn ffmpeg: {err}"))?;
let stdout = child
.stdout
.take()
.ok_or_else(|| anyhow!("ffmpeg stdout unavailable"))?;
Ok(Box::new(FfmpegChildStream { child, stdout }))
}
fn source_id(&self) -> SourceId {
SourceId {
kind: "hls".to_string(),
device_id: None,
channel: Some(self.url.clone()),
}
}
}
struct FfmpegChildStream {
child: Child,
stdout: std::process::ChildStdout,
}
impl Read for FfmpegChildStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.stdout.read(buf)
}
}
impl Drop for FfmpegChildStream {
fn drop(&mut self) {
let _ = self.child.kill();
}
}
pub fn deterministic_transcode_stream(
reader: Box<dyn Read + Send>,
) -> Result<Box<dyn Read + Send>> {
let profile = deterministic_h264_profile();
let mut cmd = Command::new("ffmpeg");
cmd.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-i")
.arg("pipe:0");
for arg in ffmpeg_profile_args(&profile) {
cmd.arg(arg);
}
cmd.arg("-f")
.arg("mpegts")
.arg("pipe:1")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut child = cmd
.spawn()
.map_err(|err| anyhow!("failed to spawn ffmpeg: {err}"))?;
let mut stdin = child
.stdin
.take()
.ok_or_else(|| anyhow!("ffmpeg stdin unavailable"))?;
let stdout = child
.stdout
.take()
.ok_or_else(|| anyhow!("ffmpeg stdout unavailable"))?;
let writer = thread::spawn(move || {
let mut reader = reader;
let _ = std::io::copy(&mut reader, &mut stdin);
});
Ok(Box::new(FfmpegTranscodeStream {
child,
stdout,
writer: Some(writer),
}))
}
struct FfmpegTranscodeStream {
child: Child,
stdout: std::process::ChildStdout,
writer: Option<thread::JoinHandle<()>>,
}
impl Read for FfmpegTranscodeStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.stdout.read(buf)
}
}
impl Drop for FfmpegTranscodeStream {
fn drop(&mut self) {
let _ = self.child.kill();
if let Some(writer) = self.writer.take() {
let _ = writer.join();
}
}
}

View file

@ -0,0 +1,308 @@
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn wait_for_line_prefix(
lines: &mut dyn Iterator<Item = std::io::Result<String>>,
prefix: &str,
timeout: Duration,
) -> Option<String> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
match lines.next() {
Some(Ok(line)) => {
if let Some(rest) = line.strip_prefix(prefix) {
return Some(rest.trim().to_string());
}
}
Some(Err(_)) => continue,
None => break,
}
}
None
}
fn blake3_hex(path: &Path) -> anyhow::Result<String> {
let bytes = std::fs::read(path)?;
Ok(blake3::hash(&bytes).to_hex().to_string())
}
fn concat_init_and_segment(init: &Path, seg: &Path, out: &Path) -> anyhow::Result<()> {
let init_bytes = std::fs::read(init)?;
let seg_bytes = std::fs::read(seg)?;
let mut bytes = Vec::with_capacity(init_bytes.len() + seg_bytes.len());
bytes.extend_from_slice(&init_bytes);
bytes.extend_from_slice(&seg_bytes);
std::fs::write(out, bytes)?;
Ok(())
}
fn first_video_frame_keyframe_flag(mp4: &Path) -> anyhow::Result<u32> {
if Command::new("ffprobe")
.arg("-version")
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.is_err()
{
// Cross-OS environments might not have ffprobe installed; treat as skip.
return Ok(1);
}
// Read only the first decoded frame record. For fMP4 this works reliably if we concat init+seg.
let out = Command::new("ffprobe")
.arg("-v")
.arg("error")
.arg("-select_streams")
.arg("v:0")
.arg("-show_frames")
.arg("-read_intervals")
.arg("%+#1")
.arg("-show_entries")
.arg("frame=key_frame")
.arg("-of")
.arg("csv=p=0")
.arg(mp4)
.output()?;
if !out.status.success() {
anyhow::bail!("ffprobe failed: {}", String::from_utf8_lossy(&out.stderr));
}
let s = String::from_utf8_lossy(&out.stdout);
let first = s.lines().next().unwrap_or("").trim();
// Some ffprobe builds may append extra columns (e.g. side data) even with restricted
// `-show_entries`. We only care about the first token.
let token = first.split(',').next().unwrap_or("").trim();
let flag: u32 = token
.parse()
.map_err(|_| anyhow::anyhow!("unexpected ffprobe output: {first:?}"))?;
Ok(flag)
}
fn write_deterministic_ts(out_path: &Path) -> anyhow::Result<()> {
// Deterministic synthetic A/V source: 30fps CFR with a fixed sine audio tone.
// Output: MPEG-TS, constrained to a stable keyframe cadence (g=60 -> 2s GOP).
let status = Command::new("ffmpeg")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-y")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("testsrc2=size=1280x720:rate=30")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("sine=frequency=1000:sample_rate=48000")
.arg("-t")
.arg("10")
.arg("-map")
.arg("0:v:0")
.arg("-map")
.arg("1:a:0")
.arg("-c:v")
.arg("libx264")
.arg("-pix_fmt")
.arg("yuv420p")
.arg("-g")
.arg("60")
.arg("-keyint_min")
.arg("60")
.arg("-sc_threshold")
.arg("0")
.arg("-bf")
.arg("0")
.arg("-threads")
.arg("1")
.arg("-fflags")
.arg("+bitexact")
.arg("-flags:v")
.arg("+bitexact")
.arg("-c:a")
.arg("aac")
.arg("-b:a")
.arg("128k")
.arg("-ac")
.arg("2")
.arg("-ar")
.arg("48000")
.arg("-flags:a")
.arg("+bitexact")
.arg("-f")
.arg("mpegts")
.arg(out_path)
.status()?;
if !status.success() {
anyhow::bail!("ffmpeg synthetic TS generation failed with {status}");
}
Ok(())
}
fn run_ladder(ec_node: &Path, input_ts: &Path, out_dir: &Path) -> anyhow::Result<()> {
let signing_key = "11".repeat(32);
let network_secret = "22".repeat(32);
let stream_id = "every.channel/determinism/cmaf-ladder";
let broadcast_name = "every.channel/determinism/cmaf-ladder";
let mut cmd = Command::new(ec_node);
cmd.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", &signing_key)
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--encode")
.arg("cmaf")
.arg("--cmaf-ladder")
.arg("hd3")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("3")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(stream_id)
.arg("--broadcast-name")
.arg(broadcast_name)
.arg("--track-name")
.arg("chunks")
.arg("--init-track")
.arg("init")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(out_dir)
.arg("--startup-delay-ms")
.arg("0")
.arg("ts")
.arg(input_ts)
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
// This will run until --max-chunks is reached, then exit.
let mut child = cmd.spawn()?;
let stdout = child.stdout.take().expect("publisher stdout missing");
let mut lines = BufReader::new(stdout).lines();
let _remote = wait_for_line_prefix(&mut lines, "moq endpoint addr: ", Duration::from_secs(10))
.ok_or_else(|| anyhow::anyhow!("publisher did not print endpoint addr"))?;
let status = child.wait()?;
if !status.success() {
anyhow::bail!("publisher failed: {status}");
}
Ok(())
}
#[test]
#[ignore]
fn deterministic_cmaf_ladder_outputs_match_across_runs() {
let ec_node = ec_node_path();
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let tmp = std::env::temp_dir().join(format!("ec-determinism-cmaf-ladder-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
write_deterministic_ts(&input_ts).expect("write deterministic TS");
let run1 = tmp.join("run1");
let run2 = tmp.join("run2");
let _ = std::fs::remove_dir_all(&run1);
let _ = std::fs::remove_dir_all(&run2);
std::fs::create_dir_all(&run1).unwrap();
std::fs::create_dir_all(&run2).unwrap();
run_ladder(&ec_node, &input_ts, &run1).expect("run ladder 1");
run_ladder(&ec_node, &input_ts, &run2).expect("run ladder 2");
for variant in ["1080p", "720p", "480p"] {
let v1 = run1.join("cmaf-ladder").join(variant);
let v2 = run2.join("cmaf-ladder").join(variant);
let init1 = v1.join("init.mp4");
let init2 = v2.join("init.mp4");
assert!(
init1.exists() && init2.exists(),
"missing init for {variant}"
);
assert_eq!(
blake3_hex(&init1).unwrap(),
blake3_hex(&init2).unwrap(),
"init differs for {variant}"
);
for idx in 0..3 {
let s1 = v1.join(format!("segment_{idx:06}.m4s"));
let s2 = v2.join(format!("segment_{idx:06}.m4s"));
assert!(
s1.exists() && s2.exists(),
"missing segment {idx} for {variant}"
);
assert_eq!(
blake3_hex(&s1).unwrap(),
blake3_hex(&s2).unwrap(),
"segment {idx} differs for {variant}"
);
}
}
}
#[test]
#[ignore]
fn cmaf_ladder_segments_start_with_keyframes() {
let ec_node = ec_node_path();
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let tmp = std::env::temp_dir().join(format!("ec-determinism-cmaf-ladder-kf-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
write_deterministic_ts(&input_ts).expect("write deterministic TS");
let run = tmp.join("run");
let _ = std::fs::remove_dir_all(&run);
std::fs::create_dir_all(&run).unwrap();
run_ladder(&ec_node, &input_ts, &run).expect("run ladder");
for variant in ["1080p", "720p", "480p"] {
let v = run.join("cmaf-ladder").join(variant);
let init = v.join("init.mp4");
assert!(init.exists(), "missing init for {variant}");
for idx in 0..3 {
let seg = v.join(format!("segment_{idx:06}.m4s"));
assert!(seg.exists(), "missing segment {idx} for {variant}");
let stitched = tmp.join(format!("stitched-{variant}-{idx:06}.mp4"));
concat_init_and_segment(&init, &seg, &stitched).unwrap();
let keyflag = first_video_frame_keyframe_flag(&stitched).unwrap();
assert_eq!(
keyflag, 1,
"segment {idx} not keyframe-aligned for {variant}"
);
}
}
}

View file

@ -0,0 +1,231 @@
use std::io::{BufRead, BufReader, Read};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
const TS_PACKET_SIZE: usize = 188;
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn wait_for_line_prefix(
lines: &mut dyn Iterator<Item = std::io::Result<String>>,
prefix: &str,
timeout: Duration,
) -> Option<String> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
match lines.next() {
Some(Ok(line)) => {
if let Some(rest) = line.strip_prefix(prefix) {
return Some(rest.trim().to_string());
}
}
Some(Err(_)) => continue,
None => break,
}
}
None
}
fn write_short_ts_recording(
host: &str,
channel: &str,
out_path: &std::path::Path,
) -> anyhow::Result<()> {
// Use lineup to resolve name -> number, but capture from the provided host.
// (OrbStack/Linux may not resolve the lineup URL's mDNS hostname.)
let device = ec_hdhomerun::discover_from_host(host)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
let entry = ec_hdhomerun::find_lineup_entry_by_number(&lineup, channel)
.or_else(|| ec_hdhomerun::find_lineup_entry_by_name(&lineup, channel))
.ok_or_else(|| anyhow::anyhow!("channel not found in lineup: {channel}"))?;
let guide_number = entry.channel.number.as_deref().unwrap_or(channel);
let capture_url = format!("http://{host}:5004/auto/v{guide_number}");
// Capture a short TS sample directly from the HDHR.
// Retry a few times to handle "no tuner available" 5xx responses.
let mut last_err: Option<anyhow::Error> = None;
for attempt in 0..10 {
match ec_hdhomerun::open_stream_url(&capture_url, Some(14)) {
Ok(mut stream) => {
let mut file = std::fs::File::create(out_path)?;
std::io::copy(&mut stream, &mut file)?;
last_err = None;
break;
}
Err(err) => {
last_err = Some(err);
std::thread::sleep(Duration::from_millis(400 * (attempt + 1) as u64));
continue;
}
}
}
if let Some(err) = last_err {
return Err(err);
}
let mut file = std::fs::File::open(out_path)?;
let mut bytes = Vec::new();
file.read_to_end(&mut bytes)?;
let mut len = bytes.len();
let rem = len % TS_PACKET_SIZE;
if rem != 0 {
len -= rem;
std::fs::write(out_path, &bytes[..len])?;
}
if len < 188 * 200 {
anyhow::bail!("recorded TS too small ({} bytes) from HDHR {}", len, host);
}
Ok(())
}
#[test]
#[ignore]
fn e2e_cmaf_ladder_one_publisher_three_subscribers_verify_manifests() {
let host = match env_required("EVERY_CHANNEL_E2E_HDHR_HOST") {
Some(v) => v,
None => return, // skip
};
let channel = match env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL") {
Some(v) => v,
None => return, // skip
};
let ec_node = ec_node_path();
// Keep secrets deterministic for reproducibility.
let signing_key = "11".repeat(32);
let network_secret = "22".repeat(32);
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let stream_id = format!("every.channel/e2e/cmaf-ladder/{ts}");
let broadcast_name = stream_id.clone();
let tmp = std::env::temp_dir().join(format!("ec-e2e-cmaf-ladder-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
write_short_ts_recording(&host, &channel, &input_ts).expect("failed to record TS from HDHR");
let mut publisher = Command::new(&ec_node);
publisher
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", &signing_key)
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--encode")
.arg("cmaf")
.arg("--cmaf-ladder")
.arg("hd3")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("3")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("chunks")
.arg("--init-track")
.arg("init")
.arg("--manifest-track")
.arg("manifests")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(tmp.join("pub-chunks"))
.arg("--startup-delay-ms")
.arg("4000")
.arg("ts")
.arg(input_ts.to_string_lossy().to_string())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut pub_child = publisher.spawn().expect("spawn publisher");
let pub_stdout = pub_child.stdout.take().expect("publisher stdout missing");
let mut pub_lines = BufReader::new(pub_stdout).lines();
let remote = wait_for_line_prefix(
&mut pub_lines,
"moq endpoint addr: ",
Duration::from_secs(10),
)
.expect("publisher did not print endpoint addr");
let variants = ["1080p", "720p", "480p"];
let mut subscribers = Vec::new();
for variant in variants {
let out_dir = tmp.join(format!("sub-{variant}"));
let mut sub = Command::new(&ec_node);
sub.arg("moq-subscribe")
.arg("--remote")
.arg(&remote)
.arg("--remote-manifests")
.arg(&remote)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg(format!("chunks/{variant}"))
.arg("--subscribe-manifests")
.arg("--require-manifest")
.arg("--manifest-track")
.arg("manifests")
.arg("--container")
.arg("cmaf")
.arg("--subscribe-init")
.arg("--init-track")
.arg(format!("init/{variant}"))
.arg("--raw-cmaf")
.arg("--stop-after")
.arg("2")
.arg("--network-secret")
.arg(&network_secret)
.arg("--output-dir")
.arg(&out_dir)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit());
subscribers.push((
variant.to_string(),
out_dir,
sub.spawn().expect("spawn subscriber"),
));
}
for (variant, out_dir, mut child) in subscribers {
let status = child.wait().expect("wait subscriber");
assert!(status.success(), "subscriber {variant} failed: {status}");
let init = out_dir.join("init.mp4");
assert!(init.exists(), "subscriber {variant} missing init.mp4");
let seg0 = out_dir.join("segment_000000.m4s");
assert!(seg0.exists(), "subscriber {variant} missing first segment");
}
let _ = pub_child.kill();
}

View file

@ -0,0 +1,211 @@
use std::io::{BufRead, BufReader};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn looks_drm(value: &str) -> bool {
let value = value.to_lowercase();
value.contains("drm")
|| value.contains("encrypted")
|| value.contains("protected")
|| value.contains("copy")
|| value.contains("widevine")
}
fn autodiscover_hdhr_host_and_channel() -> Option<(String, String)> {
let devices = ec_hdhomerun::discover().ok()?;
let device = devices.into_iter().next()?;
let lineup = ec_hdhomerun::fetch_lineup(&device).ok()?;
let entry = lineup.iter().find(|e| {
let tag_drm = e.tags.iter().any(|t| looks_drm(t));
let raw_drm = e
.raw
.as_object()
.map(|obj| {
obj.iter()
.any(|(k, v)| looks_drm(k) || looks_drm(&v.to_string()))
})
.unwrap_or(false);
!tag_drm && !raw_drm && e.channel.number.as_deref().unwrap_or("").trim() != ""
})?;
let host = device.ip.clone();
let channel = entry
.channel
.number
.clone()
.or_else(|| Some(entry.channel.name.clone()))
.unwrap_or_else(|| "2.1".to_string());
Some((host, channel))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
// Fallback: assume a standard cargo target layout.
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
let bin = debug_dir.join("ec-node");
bin
}
#[test]
#[ignore]
fn e2e_hdhr_publish_then_subscribe_with_manifest_and_encryption() {
let host = env_required("EVERY_CHANNEL_E2E_HDHR_HOST");
let channel = env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL");
let (host, channel) = match (host, channel) {
(Some(host), Some(channel)) => (host, channel),
_ => match autodiscover_hdhr_host_and_channel() {
Some(v) => v,
None => return, // skip
},
};
let ec_node = ec_node_path();
// Keep secrets deterministic for reproducibility.
let signing_key = "11".repeat(32);
let network_secret = "22".repeat(32);
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let broadcast_name = format!("every.channel/e2e/{ts}");
let tmp = std::env::temp_dir().join(format!("ec-e2e-hdhr-{ts}"));
let publish_chunks = tmp.join("publish-chunks");
let subscribe_out = tmp.join("subscribe-out");
let mut publisher = Command::new(&ec_node);
publisher
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", &signing_key)
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("8")
.arg("--chunk-ms")
.arg("2000")
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(&publish_chunks)
.arg("hdhr")
.arg("--host")
.arg(&host)
.arg("--channel")
.arg(&channel)
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut child = publisher.spawn().expect("failed to spawn publisher");
let stdout = child.stdout.take().expect("publisher stdout missing");
let mut lines = BufReader::new(stdout).lines();
let mut remote: Option<String> = None;
let mut track: Option<String> = None;
let deadline = Instant::now() + Duration::from_secs(10);
while Instant::now() < deadline {
let line = match lines.next() {
Some(Ok(line)) => line,
Some(Err(_)) => continue,
None => break,
};
if let Some(rest) = line.strip_prefix("moq endpoint addr: ") {
remote = Some(rest.trim().to_string());
} else if let Some(rest) = line.strip_prefix("moq track: ") {
track = Some(rest.trim().to_string());
}
if remote.is_some() && track.is_some() {
break;
}
}
let remote = remote.expect("publisher did not print endpoint addr in time");
let track = track.expect("publisher did not print track in time");
let mut subscriber = Command::new(&ec_node);
subscriber
.arg("moq-subscribe")
.arg("--remote")
.arg(&remote)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg(&track)
.arg("--subscribe-manifests")
.arg("--require-manifest")
.arg("--max-invalid-chunks")
.arg("0")
.arg("--stop-after")
.arg("3")
.arg("--output-dir")
.arg(&subscribe_out)
.arg("--chunk-ms")
.arg("2000")
.arg("--network-secret")
.arg(&network_secret)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit());
let mut sub_child = subscriber.spawn().expect("failed to spawn subscriber");
let start = Instant::now();
loop {
if let Ok(Some(status)) = sub_child.try_wait() {
assert!(status.success(), "subscriber exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(30) {
let _ = sub_child.kill();
panic!("subscriber timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
// Publisher should exit after max chunks; don't hang forever.
let start = Instant::now();
loop {
if let Ok(Some(status)) = child.try_wait() {
assert!(status.success(), "publisher exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(30) {
let _ = child.kill();
panic!("publisher timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
let playlist = subscribe_out.join("index.m3u8");
assert!(
playlist.exists(),
"missing playlist at {}",
playlist.display()
);
let segments = std::fs::read_dir(&subscribe_out)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.file_name().to_string_lossy().starts_with("segment_"))
.count();
assert!(segments >= 1, "expected at least one segment");
}

View file

@ -0,0 +1,305 @@
use std::io::{BufRead, BufReader, Read, Write};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
const TS_PACKET_SIZE: usize = 188;
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn wait_for_line_prefix(
lines: &mut dyn Iterator<Item = std::io::Result<String>>,
prefix: &str,
timeout: Duration,
) -> Option<String> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
match lines.next() {
Some(Ok(line)) => {
if let Some(rest) = line.strip_prefix(prefix) {
return Some(rest.trim().to_string());
}
}
Some(Err(_)) => continue,
None => break,
}
}
None
}
fn write_short_ts_recording(
host: &str,
channel: &str,
out_path: &std::path::Path,
) -> anyhow::Result<()> {
// Use the lineup's stream URL so we get the correct host/port (often :5004).
// HDHomeRun supports `duration=...` on the stream URL on many models.
// We also cap by time/bytes to avoid hanging if duration is ignored.
let device = ec_hdhomerun::discover_from_host(host)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
let entry = ec_hdhomerun::find_lineup_entry_by_number(&lineup, channel)
.or_else(|| ec_hdhomerun::find_lineup_entry_by_name(&lineup, channel))
.ok_or_else(|| anyhow::anyhow!("channel not found in lineup: {channel}"))?;
// Tuner allocation can transiently fail (503) if another client is using all tuners.
// Retry briefly; we only need a short capture.
let mut last_err: Option<anyhow::Error> = None;
let mut stream = loop {
match ec_hdhomerun::open_stream_entry(entry, Some(8)) {
Ok(stream) => break stream,
Err(err) => {
let msg = format!("{err:#}");
last_err = Some(err);
if msg.contains("503") {
std::thread::sleep(Duration::from_millis(500));
continue;
}
return Err(last_err.unwrap());
}
}
};
let mut file = std::fs::File::create(out_path)?;
let start = Instant::now();
let mut bytes = 0usize;
let mut buf = [0u8; 64 * 1024];
loop {
let n = stream.read(&mut buf)?;
if n == 0 {
break;
}
file.write_all(&buf[..n])?;
bytes += n;
if bytes >= 8 * 1024 * 1024 {
break;
}
if start.elapsed() > Duration::from_secs(6) {
break;
}
}
file.flush()?;
// Ensure the TS file ends on a packet boundary.
let len = file.metadata()?.len();
let rem = (len as usize) % TS_PACKET_SIZE;
if rem != 0 {
file.set_len(len - rem as u64)?;
bytes = (len as usize) - rem;
}
if bytes < 188 * 20 {
anyhow::bail!("recorded TS too small ({} bytes) from HDHR {}", bytes, host);
}
Ok(())
}
#[test]
#[ignore]
fn e2e_split_sources_manifests_from_one_peer_objects_from_another() {
let host = match env_required("EVERY_CHANNEL_E2E_HDHR_HOST") {
Some(v) => v,
None => return, // skip
};
let channel = match env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL") {
Some(v) => v,
None => return, // skip
};
let ec_node = ec_node_path();
// Keep secrets deterministic for reproducibility.
let signing_key = "11".repeat(32);
let network_secret = "22".repeat(32);
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let stream_id = format!("every.channel/e2e/mesh/{ts}");
let broadcast_name = stream_id.clone();
let tmp = std::env::temp_dir().join(format!("ec-e2e-mesh-split-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
let manifest_chunks = tmp.join("chunks-manifests");
let object_chunks = tmp.join("chunks-objects");
let subscribe_out = tmp.join("subscribe-out");
write_short_ts_recording(&host, &channel, &input_ts).expect("failed to record TS from HDHR");
// Publisher A: leader/signer, publishes manifests only.
// Give subscribers time to connect before ingest starts.
let mut pub_manifests = Command::new(&ec_node);
pub_manifests
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", &signing_key)
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--publish-chunks")
.arg("false")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("6")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("noop")
.arg("--manifest-track")
.arg("manifests")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(&manifest_chunks)
.arg("--startup-delay-ms")
.arg("5000")
.arg("ts")
.arg(input_ts.to_string_lossy().to_string())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut pub_a = pub_manifests.spawn().expect("spawn manifest publisher");
let a_stdout = pub_a
.stdout
.take()
.expect("manifest publisher stdout missing");
let mut a_lines = BufReader::new(a_stdout).lines();
let remote_manifests =
wait_for_line_prefix(&mut a_lines, "moq endpoint addr: ", Duration::from_secs(10))
.expect("manifest publisher did not print endpoint addr");
// Publisher B: relay/data, publishes chunk objects only.
// Delay longer than the manifest publisher so the subscriber can receive manifests first.
let mut pub_objects = Command::new(&ec_node);
pub_objects
.arg("moq-publish")
.arg("--publish-chunks")
.arg("true")
.arg("--max-chunks")
.arg("6")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("objects")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(&object_chunks)
.arg("--startup-delay-ms")
.arg("9000")
.arg("ts")
.arg(input_ts.to_string_lossy().to_string())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut pub_b = pub_objects.spawn().expect("spawn object publisher");
let b_stdout = pub_b
.stdout
.take()
.expect("object publisher stdout missing");
let mut b_lines = BufReader::new(b_stdout).lines();
let remote_objects =
wait_for_line_prefix(&mut b_lines, "moq endpoint addr: ", Duration::from_secs(10))
.expect("object publisher did not print endpoint addr");
// Subscriber: stitch objects from B with manifests from A.
let mut subscriber = Command::new(&ec_node);
subscriber
.arg("moq-subscribe")
.arg("--remote")
.arg(&remote_objects)
.arg("--remote-manifests")
.arg(&remote_manifests)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("objects")
.arg("--manifest-track")
.arg("manifests")
.arg("--subscribe-manifests")
.arg("--require-manifest")
.arg("--max-invalid-chunks")
.arg("0")
.arg("--stop-after")
.arg("2")
.arg("--output-dir")
.arg(&subscribe_out)
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--network-secret")
.arg(&network_secret)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit());
let mut sub_child = subscriber.spawn().expect("failed to spawn subscriber");
let start = Instant::now();
loop {
if let Ok(Some(status)) = sub_child.try_wait() {
assert!(status.success(), "subscriber exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(30) {
let _ = sub_child.kill();
panic!("subscriber timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
// Ensure publishers exit after max chunks.
for child in [&mut pub_a, &mut pub_b] {
let start = Instant::now();
loop {
if let Ok(Some(status)) = child.try_wait() {
assert!(status.success(), "publisher exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(30) {
let _ = child.kill();
panic!("publisher timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
}
let playlist = subscribe_out.join("index.m3u8");
assert!(
playlist.exists(),
"missing playlist at {}",
playlist.display()
);
let segments = std::fs::read_dir(&subscribe_out)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.file_name().to_string_lossy().starts_with("segment_"))
.count();
assert!(segments >= 1, "expected at least one segment");
}

View file

@ -0,0 +1,345 @@
use std::io::{BufRead, BufReader, Read};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
const TS_PACKET_SIZE: usize = 188;
fn env_required(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
}
fn looks_drm(value: &str) -> bool {
let value = value.to_lowercase();
value.contains("drm")
|| value.contains("encrypted")
|| value.contains("protected")
|| value.contains("copy")
|| value.contains("widevine")
}
fn autodiscover_hdhr_host_and_channel() -> Option<(String, String)> {
let devices = ec_hdhomerun::discover().ok()?;
let device = devices.into_iter().next()?;
let lineup = ec_hdhomerun::fetch_lineup(&device).ok()?;
let entry = lineup.iter().find(|e| {
// Prefer a likely-clear channel to avoid false negatives in E2E.
let tag_drm = e.tags.iter().any(|t| looks_drm(t));
let raw_drm = e
.raw
.as_object()
.map(|obj| {
obj.iter()
.any(|(k, v)| looks_drm(k) || looks_drm(&v.to_string()))
})
.unwrap_or(false);
!tag_drm && !raw_drm && e.channel.number.as_deref().unwrap_or("").trim() != ""
})?;
let host = device.ip.clone();
let channel = entry
.channel
.number
.clone()
.or_else(|| Some(entry.channel.name.clone()))
.unwrap_or_else(|| "2.1".to_string());
Some((host, channel))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn wait_for_line_prefix(
lines: &mut dyn Iterator<Item = std::io::Result<String>>,
prefix: &str,
timeout: Duration,
) -> Option<String> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
match lines.next() {
Some(Ok(line)) => {
if let Some(rest) = line.strip_prefix(prefix) {
return Some(rest.trim().to_string());
}
}
Some(Err(_)) => continue,
None => break,
}
}
None
}
fn write_short_ts_recording(
host: &str,
channel: &str,
out_path: &std::path::Path,
) -> anyhow::Result<()> {
// Use lineup to resolve name -> number, but capture from the provided host.
// (OrbStack/Linux may not resolve the lineup URL's mDNS hostname.)
let device = ec_hdhomerun::discover_from_host(host)?;
let lineup = ec_hdhomerun::fetch_lineup(&device)?;
let entry = ec_hdhomerun::find_lineup_entry_by_number(&lineup, channel)
.or_else(|| ec_hdhomerun::find_lineup_entry_by_name(&lineup, channel))
.ok_or_else(|| anyhow::anyhow!("channel not found in lineup: {channel}"))?;
let guide_number = entry.channel.number.as_deref().unwrap_or(channel);
let capture_url = format!("http://{host}:5004/auto/v{guide_number}");
// Capture a short TS sample directly from the HDHR.
// Retry a few times to handle "no tuner available" 5xx responses.
let mut last_err: Option<anyhow::Error> = None;
for attempt in 0..10 {
match ec_hdhomerun::open_stream_url(&capture_url, Some(12)) {
Ok(mut stream) => {
let mut file = std::fs::File::create(out_path)?;
std::io::copy(&mut stream, &mut file)?;
last_err = None;
break;
}
Err(err) => {
last_err = Some(err);
std::thread::sleep(Duration::from_millis(400 * (attempt + 1) as u64));
continue;
}
}
}
if let Some(err) = last_err {
return Err(err);
}
let mut file = std::fs::File::open(out_path)?;
let mut bytes = Vec::new();
file.read_to_end(&mut bytes)?;
let mut len = bytes.len();
// Ensure the TS file ends on a packet boundary.
let rem = len % TS_PACKET_SIZE;
if rem != 0 {
len -= rem;
std::fs::write(out_path, &bytes[..len])?;
}
if len < 188 * 200 {
anyhow::bail!("recorded TS too small ({} bytes) from HDHR {}", len, host);
}
Ok(())
}
#[test]
#[ignore]
fn e2e_split_sources_cmaf_init_from_objects_peer_segments_verified_by_manifests_peer() {
let host = env_required("EVERY_CHANNEL_E2E_HDHR_HOST");
let channel = env_required("EVERY_CHANNEL_E2E_HDHR_CHANNEL");
let (host, channel) = match (host, channel) {
(Some(host), Some(channel)) => (host, channel),
_ => match autodiscover_hdhr_host_and_channel() {
Some(v) => v,
None => return, // skip
},
};
let ec_node = ec_node_path();
// Keep secrets deterministic for reproducibility.
let signing_key = "11".repeat(32);
let network_secret = "22".repeat(32);
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let stream_id = format!("every.channel/e2e/mesh-cmaf/{ts}");
let broadcast_name = stream_id.clone();
let tmp = std::env::temp_dir().join(format!("ec-e2e-mesh-split-cmaf-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
let manifest_chunks = tmp.join("chunks-manifests");
let object_chunks = tmp.join("chunks-objects");
let subscribe_out = tmp.join("subscribe-out");
write_short_ts_recording(&host, &channel, &input_ts).expect("failed to record TS from HDHR");
// Publisher A: leader/signer, publishes manifests only (for CMAF segments).
let mut pub_manifests = Command::new(&ec_node);
pub_manifests
.env("EVERY_CHANNEL_MANIFEST_SIGNING_KEY", &signing_key)
.arg("moq-publish")
.arg("--publish-manifests")
.arg("--publish-chunks")
.arg("false")
.arg("--encode")
.arg("cmaf")
.arg("--epoch-chunks")
.arg("1")
.arg("--max-chunks")
.arg("4")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("noop")
.arg("--manifest-track")
.arg("manifests")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(&manifest_chunks)
.arg("--startup-delay-ms")
.arg("6000")
.arg("ts")
.arg(input_ts.to_string_lossy().to_string())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut pub_a = pub_manifests.spawn().expect("spawn manifest publisher");
let a_stdout = pub_a
.stdout
.take()
.expect("manifest publisher stdout missing");
let mut a_lines = BufReader::new(a_stdout).lines();
let remote_manifests =
wait_for_line_prefix(&mut a_lines, "moq endpoint addr: ", Duration::from_secs(10))
.expect("manifest publisher did not print endpoint addr");
// Publisher B: publishes init + segments as objects only.
let mut pub_objects = Command::new(&ec_node);
pub_objects
.arg("moq-publish")
.arg("--publish-chunks")
.arg("true")
.arg("--encode")
.arg("cmaf")
.arg("--init-track")
.arg("init")
.arg("--max-chunks")
.arg("4")
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("objects")
.arg("--network-secret")
.arg(&network_secret)
.arg("--chunk-dir")
.arg(&object_chunks)
.arg("--startup-delay-ms")
.arg("10000")
.arg("ts")
.arg(input_ts.to_string_lossy().to_string())
.stdout(Stdio::piped())
.stderr(Stdio::inherit());
let mut pub_b = pub_objects.spawn().expect("spawn object publisher");
let b_stdout = pub_b
.stdout
.take()
.expect("object publisher stdout missing");
let mut b_lines = BufReader::new(b_stdout).lines();
let remote_objects =
wait_for_line_prefix(&mut b_lines, "moq endpoint addr: ", Duration::from_secs(10))
.expect("object publisher did not print endpoint addr");
// Subscriber: init+segments from B, manifests from A.
let mut subscriber = Command::new(&ec_node);
subscriber
.arg("moq-subscribe")
.arg("--remote")
.arg(&remote_objects)
.arg("--remote-manifests")
.arg(&remote_manifests)
.arg("--broadcast-name")
.arg(&broadcast_name)
.arg("--track-name")
.arg("objects")
.arg("--manifest-track")
.arg("manifests")
.arg("--subscribe-manifests")
.arg("--require-manifest")
.arg("--max-invalid-chunks")
.arg("0")
.arg("--container")
.arg("cmaf")
.arg("--subscribe-init")
.arg("--init-track")
.arg("init")
.arg("--stop-after")
.arg("2")
.arg("--output-dir")
.arg(&subscribe_out)
.arg("--chunk-ms")
.arg("2000")
.arg("--stream-id")
.arg(&stream_id)
.arg("--network-secret")
.arg(&network_secret)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit());
let mut sub_child = subscriber.spawn().expect("failed to spawn subscriber");
let start = Instant::now();
loop {
if let Ok(Some(status)) = sub_child.try_wait() {
assert!(status.success(), "subscriber exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(60) {
let _ = sub_child.kill();
panic!("subscriber timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
// Ensure publishers exit after max chunks.
for child in [&mut pub_a, &mut pub_b] {
let start = Instant::now();
loop {
if let Ok(Some(status)) = child.try_wait() {
assert!(status.success(), "publisher exited with {status}");
break;
}
if start.elapsed() > Duration::from_secs(90) {
let _ = child.kill();
panic!("publisher timed out");
}
std::thread::sleep(Duration::from_millis(200));
}
}
let playlist = subscribe_out.join("index.m3u8");
assert!(
playlist.exists(),
"missing playlist at {}",
playlist.display()
);
let init = subscribe_out.join("init.mp4");
assert!(init.exists(), "missing init segment at {}", init.display());
let segments = std::fs::read_dir(&subscribe_out)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.file_name().to_string_lossy().ends_with(".m4s"))
.count();
assert!(segments >= 1, "expected at least one .m4s segment");
}

View file

@ -0,0 +1,314 @@
use std::ffi::OsStr;
use std::io::{BufRead, BufReader, Write};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
fn which(cmd: &str) -> Option<std::path::PathBuf> {
if let Ok(path) = which::which(cmd) {
return Some(path);
}
None
}
fn chrome_path() -> Option<std::path::PathBuf> {
// Prefer the standard macOS Chrome app bundle.
let mac =
std::path::PathBuf::from("/Applications/Google Chrome.app/Contents/MacOS/Google Chrome");
if mac.exists() {
return Some(mac);
}
which("google-chrome")
.or_else(|| which("google-chrome-stable"))
.or_else(|| which("chromium"))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn read_line_with_timeout(
lines: &mut dyn Iterator<Item = std::io::Result<String>>,
timeout: Duration,
) -> Option<String> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
match lines.next() {
Some(Ok(line)) => {
let line = line.trim().to_string();
if !line.is_empty() {
return Some(line);
}
}
Some(Err(_)) => continue,
None => break,
}
}
None
}
fn generate_ts_fixture(out: &std::path::Path) -> anyhow::Result<()> {
// Deterministic-ish fixture: single-threaded x264, fixed GOP, sine audio.
let status = Command::new("ffmpeg")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-y")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("testsrc2=size=1280x720:rate=30")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("sine=frequency=1000:sample_rate=48000")
.arg("-t")
.arg("12")
.arg("-map")
.arg("0:v:0")
.arg("-map")
.arg("1:a:0")
.arg("-c:v")
.arg("libx264")
.arg("-pix_fmt")
.arg("yuv420p")
.arg("-g")
.arg("60")
.arg("-keyint_min")
.arg("60")
.arg("-sc_threshold")
.arg("0")
.arg("-bf")
.arg("0")
.arg("-threads")
.arg("1")
.arg("-c:a")
.arg("aac")
.arg("-b:a")
.arg("128k")
.arg("-ac")
.arg("2")
.arg("-ar")
.arg("48000")
.arg("-f")
.arg("mpegts")
.arg(out)
.status()?;
if !status.success() {
anyhow::bail!("ffmpeg fixture generation failed with {status}");
}
Ok(())
}
fn click_button_by_text(tab: &headless_chrome::Tab, text: &str) -> anyhow::Result<()> {
let js = format!(
r#"(function() {{
let btns = Array.from(document.querySelectorAll('button'));
let btn = btns.find(b => (b.innerText || '').trim() === {t});
if (!btn) return false;
btn.click();
return true;
}})();"#,
t = serde_json::to_string(text).unwrap()
);
let v = tab.evaluate(&js, false)?;
let ok = v.value.and_then(|v| v.as_bool()).unwrap_or(false);
if !ok {
anyhow::bail!("button not found: {text}");
}
Ok(())
}
fn fill_input_by_placeholder(
tab: &headless_chrome::Tab,
placeholder: &str,
value: &str,
) -> anyhow::Result<()> {
let js = format!(
r#"(function() {{
let input = document.querySelector('input[placeholder={p}]');
if (!input) return false;
input.focus();
input.value = {v};
input.dispatchEvent(new Event('input', {{ bubbles: true }}));
input.dispatchEvent(new Event('change', {{ bubbles: true }}));
return true;
}})();"#,
p = serde_json::to_string(placeholder).unwrap(),
v = serde_json::to_string(value).unwrap()
);
let v = tab.evaluate(&js, false)?;
let ok = v.value.and_then(|v| v.as_bool()).unwrap_or(false);
if !ok {
anyhow::bail!("input not found for placeholder: {placeholder}");
}
Ok(())
}
fn get_reply_link(tab: &headless_chrome::Tab) -> anyhow::Result<Option<String>> {
// Read the last readonly input inside the add menu; this is where we render the reply code.
let js = r#"(function() {
let menu = document.querySelector('.source-menu');
if (!menu) return null;
let inputs = Array.from(menu.querySelectorAll('input.source-menu-input[readonly]'));
if (!inputs.length) return null;
return inputs[inputs.length - 1].value || null;
})();"#;
let v = tab.evaluate(js, false)?;
Ok(v.value.and_then(|v| v.as_str().map(|s| s.to_string())))
}
fn wait_for_text(
tab: &headless_chrome::Tab,
needle: &str,
timeout: Duration,
) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = format!(
r#"(function() {{
return document.body && (document.body.innerText || '').includes({n});
}})();"#,
n = serde_json::to_string(needle).unwrap()
);
let v = tab.evaluate(&js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for text: {needle}");
}
fn wait_for_blob_video(tab: &headless_chrome::Tab, timeout: Duration) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = r#"(function() {
let v = document.querySelector('video');
if (!v) return false;
if (typeof v.src !== 'string') return false;
return v.src.startsWith('blob:');
})();"#;
let v = tab.evaluate(js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for video blob src");
}
#[test]
#[ignore]
fn e2e_remote_website_connects_to_local_direct_publisher() -> anyhow::Result<()> {
if which("ffmpeg").is_none() {
return Ok(()); // skip
}
let chrome = match chrome_path() {
Some(p) => p,
None => return Ok(()), // skip
};
let site_url = std::env::var("EVERY_CHANNEL_SITE_URL")
.unwrap_or_else(|_| "https://every.channel/".to_string());
let ec_node = ec_node_path();
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let tmp = std::env::temp_dir().join(format!("ec-e2e-remote-website-direct-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
let chunk_dir = tmp.join("chunks");
generate_ts_fixture(&input_ts)?;
let mut pub_child = Command::new(&ec_node)
.arg("direct-publish")
.arg("--chunk-dir")
.arg(&chunk_dir)
.arg("--chunk-ms")
.arg("2000")
.arg("--max-segments")
.arg("6")
.arg("ts")
.arg(&input_ts)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()?;
let stdout = pub_child.stdout.take().expect("publisher stdout missing");
let mut lines = BufReader::new(stdout).lines();
let offer = read_line_with_timeout(&mut lines, Duration::from_secs(60))
.ok_or_else(|| anyhow::anyhow!("publisher did not print offer link in time"))?;
if !offer.starts_with("every.channel://direct?c=") {
anyhow::bail!("unexpected offer link: {offer}");
}
let launch_options = headless_chrome::LaunchOptionsBuilder::default()
.path(Some(chrome))
.headless(true)
.args(vec![
OsStr::new("--autoplay-policy=no-user-gesture-required"),
OsStr::new("--mute-audio"),
])
.build()
.unwrap();
let browser = headless_chrome::Browser::new(launch_options)?;
let tab = browser.new_tab()?;
tab.navigate_to(&site_url)?;
tab.wait_until_navigated()?;
// Open the add menu via class selector (stable).
tab.wait_for_element("button.add-source")?.click()?;
tab.wait_for_element(".source-menu")?;
// Use Watch a link flow.
fill_input_by_placeholder(&tab, "every.channel://watch?...", &offer)?;
click_button_by_text(&tab, "Parse link")?;
click_button_by_text(&tab, "Tune in")?;
// Poll for reply link.
let deadline = Instant::now() + Duration::from_secs(60);
let reply = loop {
if let Some(v) = get_reply_link(&tab)? {
if v.starts_with("every.channel://direct?c=") {
break v;
}
}
if Instant::now() > deadline {
anyhow::bail!("timed out waiting for reply link in UI");
}
std::thread::sleep(Duration::from_millis(200));
};
// Feed reply back to publisher.
let stdin = pub_child.stdin.as_mut().expect("publisher stdin missing");
writeln!(stdin, "{reply}")?;
stdin.flush()?;
// Website should go Live and show a blob video source.
wait_for_text(&tab, "Live", Duration::from_secs(60))?;
wait_for_blob_video(&tab, Duration::from_secs(60))?;
// Cleanup.
let _ = pub_child.kill();
let _ = pub_child.wait();
let _ = std::fs::remove_dir_all(&tmp);
Ok(())
}

View file

@ -0,0 +1,243 @@
use std::ffi::OsStr;
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
fn which(cmd: &str) -> Option<std::path::PathBuf> {
which::which(cmd).ok()
}
fn chrome_path() -> Option<std::path::PathBuf> {
// Prefer the standard macOS Chrome app bundle.
let mac =
std::path::PathBuf::from("/Applications/Google Chrome.app/Contents/MacOS/Google Chrome");
if mac.exists() {
return Some(mac);
}
which("google-chrome")
.or_else(|| which("google-chrome-stable"))
.or_else(|| which("chromium"))
}
fn ec_node_path() -> std::path::PathBuf {
if let Ok(value) = std::env::var("EC_NODE_BIN") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec_node") {
return value.into();
}
if let Ok(value) = std::env::var("CARGO_BIN_EXE_ec-node") {
return value.into();
}
let exe = std::env::current_exe().expect("current_exe");
let debug_dir = exe
.parent()
.and_then(|p| p.parent())
.expect("expected target/debug/deps");
debug_dir.join("ec-node")
}
fn generate_ts_fixture(out: &std::path::Path) -> anyhow::Result<()> {
// Deterministic-ish fixture: single-threaded x264, fixed GOP, sine audio.
let status = Command::new("ffmpeg")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-nostdin")
.arg("-y")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("testsrc2=size=1280x720:rate=30")
.arg("-f")
.arg("lavfi")
.arg("-i")
.arg("sine=frequency=1000:sample_rate=48000")
.arg("-t")
.arg("12")
.arg("-map")
.arg("0:v:0")
.arg("-map")
.arg("1:a:0")
.arg("-c:v")
.arg("libx264")
.arg("-pix_fmt")
.arg("yuv420p")
.arg("-g")
.arg("60")
.arg("-keyint_min")
.arg("60")
.arg("-sc_threshold")
.arg("0")
.arg("-bf")
.arg("0")
.arg("-threads")
.arg("1")
.arg("-c:a")
.arg("aac")
.arg("-b:a")
.arg("128k")
.arg("-ac")
.arg("2")
.arg("-ar")
.arg("48000")
.arg("-f")
.arg("mpegts")
.arg(out)
.status()?;
if !status.success() {
anyhow::bail!("ffmpeg fixture generation failed with {status}");
}
Ok(())
}
fn click_css(tab: &headless_chrome::Tab, css: &str) -> anyhow::Result<()> {
tab.wait_for_element(css)?.click()?;
Ok(())
}
fn wait_for_text(
tab: &headless_chrome::Tab,
needle: &str,
timeout: Duration,
) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = format!(
r#"(function() {{
return document.body && (document.body.innerText || '').includes({n});
}})();"#,
n = serde_json::to_string(needle).unwrap()
);
let v = tab.evaluate(&js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for text: {needle}");
}
fn wait_for_blob_video(tab: &headless_chrome::Tab, timeout: Duration) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = r#"(function() {
let v = document.querySelector('video');
if (!v) return false;
if (typeof v.src !== 'string') return false;
return v.src.startsWith('blob:');
})();"#;
let v = tab.evaluate(js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for video blob src");
}
fn click_global_watch(tab: &headless_chrome::Tab, stream_id: &str) -> anyhow::Result<bool> {
let js = format!(
r#"(function() {{
let target = {sid};
let btn = document.querySelector(`button[data-stream-id="${{target}}"]`)
|| document.querySelector(`button[data_stream_id="${{target}}"]`);
if (!btn) return false;
btn.click();
return true;
}})();"#,
sid = serde_json::to_string(stream_id).unwrap()
);
let v = tab.evaluate(&js, false)?;
Ok(v.value.and_then(|v| v.as_bool()).unwrap_or(false))
}
#[test]
#[ignore]
fn e2e_remote_website_directory_connects_to_local_direct_publisher() -> anyhow::Result<()> {
if which("ffmpeg").is_none() {
return Ok(()); // skip
}
let chrome = match chrome_path() {
Some(p) => p,
None => return Ok(()), // skip
};
let site_url = std::env::var("EVERY_CHANNEL_SITE_URL")
.unwrap_or_else(|_| "https://every.channel/".to_string());
let directory_url = std::env::var("EVERY_CHANNEL_DIRECTORY_URL")
.unwrap_or_else(|_| "https://every.channel".to_string());
let ec_node = ec_node_path();
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
let stream_id = format!("every.channel/e2e/{ts}");
let title = format!("E2E {ts}");
let tmp = std::env::temp_dir().join(format!("ec-e2e-remote-website-directory-{ts}"));
let _ = std::fs::create_dir_all(&tmp);
let input_ts = tmp.join("input.ts");
let chunk_dir = tmp.join("chunks");
generate_ts_fixture(&input_ts)?;
let mut pub_child = Command::new(&ec_node)
.arg("direct-publish")
.arg("--directory-url")
.arg(&directory_url)
.arg("--stream-id")
.arg(&stream_id)
.arg("--title")
.arg(&title)
.arg("--chunk-dir")
.arg(&chunk_dir)
.arg("--chunk-ms")
.arg("2000")
.arg("--max-segments")
.arg("6")
.arg("ts")
.arg(&input_ts)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.spawn()?;
let launch_options = headless_chrome::LaunchOptionsBuilder::default()
.path(Some(chrome))
.headless(true)
.args(vec![
OsStr::new("--autoplay-policy=no-user-gesture-required"),
OsStr::new("--mute-audio"),
])
.build()
.unwrap();
let browser = headless_chrome::Browser::new(launch_options)?;
let tab = browser.new_tab()?;
tab.navigate_to(&site_url)?;
tab.wait_until_navigated()?;
// Refresh public list and watch our stream_id.
click_css(&tab, "button[data-testid='global-refresh']")?;
let deadline = Instant::now() + Duration::from_secs(60);
loop {
if click_global_watch(&tab, &stream_id)? {
break;
}
if Instant::now() > deadline {
anyhow::bail!("timed out waiting for stream_id to appear in global list");
}
std::thread::sleep(Duration::from_millis(250));
let _ = click_global_watch(&tab, &stream_id)?;
}
// Website should go Live and show a blob video source.
wait_for_text(&tab, "Live", Duration::from_secs(60))?;
wait_for_blob_video(&tab, Duration::from_secs(60))?;
// Cleanup.
let _ = pub_child.kill();
let _ = pub_child.wait();
let _ = std::fs::remove_dir_all(&tmp);
Ok(())
}

View file

@ -0,0 +1,174 @@
use std::ffi::OsStr;
use std::time::{Duration, Instant};
fn which(cmd: &str) -> Option<std::path::PathBuf> {
which::which(cmd).ok()
}
fn chrome_path() -> Option<std::path::PathBuf> {
let mac =
std::path::PathBuf::from("/Applications/Google Chrome.app/Contents/MacOS/Google Chrome");
if mac.exists() {
return Some(mac);
}
which("google-chrome")
.or_else(|| which("google-chrome-stable"))
.or_else(|| which("chromium"))
}
fn click_css(tab: &headless_chrome::Tab, css: &str) -> anyhow::Result<()> {
tab.wait_for_element(css)?.click()?;
Ok(())
}
fn wait_for_text(
tab: &headless_chrome::Tab,
needle: &str,
timeout: Duration,
) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = format!(
r#"(function() {{
return document.body && (document.body.innerText || '').includes({n});
}})();"#,
n = serde_json::to_string(needle).unwrap()
);
let v = tab.evaluate(&js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for text: {needle}");
}
fn wait_for_blob_video(tab: &headless_chrome::Tab, timeout: Duration) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = r#"(function() {
let v = document.querySelector('video');
if (!v) return false;
if (typeof v.src !== 'string') return false;
return v.src.startsWith('blob:');
})();"#;
let v = tab.evaluate(js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for video blob src");
}
fn wait_for_video_element(tab: &headless_chrome::Tab, timeout: Duration) -> anyhow::Result<()> {
let deadline = Instant::now() + timeout;
while Instant::now() < deadline {
let js = r#"(function() {
return !!document.querySelector('video');
})();"#;
let v = tab.evaluate(js, false)?;
if v.value.and_then(|v| v.as_bool()).unwrap_or(false) {
return Ok(());
}
std::thread::sleep(Duration::from_millis(200));
}
anyhow::bail!("timed out waiting for <video> element");
}
fn debug_player_state(tab: &headless_chrome::Tab) -> anyhow::Result<String> {
let js = r#"(function() {
let v = document.querySelector('video');
let src = v ? (v.src || '') : null;
let placeholder = document.querySelector('.placeholder');
let placeholderText = placeholder ? (placeholder.innerText || '') : null;
let status = document.querySelector('.source-status');
let statusText = status ? (status.innerText || '') : null;
let sources = Array.from(document.querySelectorAll('button[data-testid="global-watch"]')).length;
return JSON.stringify({ hasVideo: !!v, videoSrc: src, placeholderText, statusText, sources });
})();"#;
let v = tab.evaluate(js, false)?;
Ok(v.value
.and_then(|v| v.as_str().map(|s| s.to_string()))
.unwrap_or_default())
}
fn click_global_watch(tab: &headless_chrome::Tab, stream_id: &str) -> anyhow::Result<bool> {
let js = format!(
r#"(function() {{
let target = {sid};
let btn = document.querySelector(`button[data-stream-id="${{target}}"]`);
if (!btn) return false;
// Some SPA frameworks attach delegated listeners; dispatch a real click event.
btn.dispatchEvent(new MouseEvent('click', {{ bubbles: true, cancelable: true, view: window }}));
return true;
}})();"#,
sid = serde_json::to_string(stream_id).unwrap()
);
let v = tab.evaluate(&js, false)?;
Ok(v.value.and_then(|v| v.as_bool()).unwrap_or(false))
}
#[test]
#[ignore]
fn e2e_remote_website_watch_existing_stream_id() -> anyhow::Result<()> {
let chrome = match chrome_path() {
Some(p) => p,
None => return Ok(()), // skip
};
// We still want ffmpeg around for parity with other E2Es (and to discourage "works only without media tools").
if which("ffmpeg").is_none() {
return Ok(()); // skip
}
let site_url = std::env::var("EVERY_CHANNEL_SITE_URL")
.unwrap_or_else(|_| "https://every.channel/".to_string());
let stream_id = match std::env::var("EVERY_CHANNEL_STREAM_ID") {
Ok(v) if !v.trim().is_empty() => v,
_ => return Ok(()), // skip
};
let launch_options = headless_chrome::LaunchOptionsBuilder::default()
.path(Some(chrome))
.headless(true)
.args(vec![
OsStr::new("--autoplay-policy=no-user-gesture-required"),
OsStr::new("--mute-audio"),
OsStr::new("--disable-application-cache"),
OsStr::new("--disable-service-worker"),
OsStr::new("--disk-cache-size=0"),
])
.build()
.unwrap();
let browser = headless_chrome::Browser::new(launch_options)?;
let tab = browser.new_tab()?;
tab.navigate_to(&site_url)?;
tab.wait_until_navigated()?;
click_css(&tab, "button[data-testid='global-refresh']")?;
let deadline = Instant::now() + Duration::from_secs(60);
loop {
if click_global_watch(&tab, &stream_id)? {
break;
}
if Instant::now() > deadline {
anyhow::bail!("timed out waiting for stream_id to appear in global list");
}
std::thread::sleep(Duration::from_millis(250));
}
// Ensure the player is instantiated.
if let Err(err) = wait_for_video_element(&tab, Duration::from_secs(90)) {
let st = debug_player_state(&tab).unwrap_or_default();
anyhow::bail!("{err}\nplayer_state={st}");
}
// We consider playback "started" when the video uses a blob: URL (MSE).
if let Err(err) = wait_for_blob_video(&tab, Duration::from_secs(90)) {
let st = debug_player_state(&tab).unwrap_or_default();
anyhow::bail!("{err}\nplayer_state={st}");
}
Ok(())
}

10
crates/ec-ts/Cargo.toml Normal file
View file

@ -0,0 +1,10 @@
[package]
name = "ec-ts"
version = "0.0.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
serde.workspace = true
serde-big-array = "0.5"

648
crates/ec-ts/src/lib.rs Normal file
View file

@ -0,0 +1,648 @@
//! Minimal MPEG-TS parsing for timing and table extraction.
use anyhow::{anyhow, Result};
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use std::collections::HashMap;
use std::io::Read;
pub const TS_PACKET_SIZE: usize = 188;
pub const PID_ATSC_PSIP: u16 = 0x1FFB;
pub const PID_DVB_TDT_TOT: u16 = 0x0014;
const SYNC_BYTE: u8 = 0x47;
const TABLE_ID_ATSC_STT: u8 = 0xCD;
const TABLE_ID_DVB_TDT: u8 = 0x70;
const TABLE_ID_DVB_TOT: u8 = 0x73;
const GPS_EPOCH_TO_UNIX: i64 = 315964800;
const MJD_UNIX_EPOCH: i64 = 40587;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TsPacket {
#[serde(with = "BigArray")]
data: [u8; TS_PACKET_SIZE],
pub pid: u16,
pub payload_unit_start: bool,
pub continuity_counter: u8,
pub discontinuity: bool,
pub pcr_27mhz: Option<u64>,
payload_offset: usize,
payload_len: usize,
}
impl TsPacket {
pub fn payload(&self) -> &[u8] {
&self.data[self.payload_offset..self.payload_offset + self.payload_len]
}
pub fn as_bytes(&self) -> &[u8; TS_PACKET_SIZE] {
&self.data
}
}
pub struct TsReader<R> {
reader: R,
}
impl<R: Read> TsReader<R> {
pub fn new(reader: R) -> Self {
Self { reader }
}
pub fn read_packet(&mut self) -> Result<Option<TsPacket>> {
let mut data = [0u8; TS_PACKET_SIZE];
let mut read = 0usize;
while read < TS_PACKET_SIZE {
let n = self.reader.read(&mut data[read..])?;
if n == 0 {
if read == 0 {
return Ok(None);
}
return Err(anyhow!("truncated TS packet"));
}
read += n;
}
let packet = parse_packet(data)?;
Ok(Some(packet))
}
}
pub fn parse_packet(data: [u8; TS_PACKET_SIZE]) -> Result<TsPacket> {
if data[0] != SYNC_BYTE {
return Err(anyhow!("missing sync byte"));
}
let payload_unit_start = (data[1] & 0x40) != 0;
let pid = ((data[1] as u16 & 0x1F) << 8) | data[2] as u16;
let continuity_counter = data[3] & 0x0F;
let adaptation_control = (data[3] >> 4) & 0x03;
let has_adaptation = adaptation_control == 2 || adaptation_control == 3;
let has_payload = adaptation_control == 1 || adaptation_control == 3;
let mut offset = 4usize;
let mut discontinuity = false;
let mut pcr_27mhz = None;
if has_adaptation {
let length = data[offset] as usize;
offset += 1;
if offset + length > TS_PACKET_SIZE {
return Err(anyhow!("invalid adaptation field length"));
}
if length > 0 {
let flags = data[offset];
discontinuity = (flags & 0x80) != 0;
let pcr_flag = (flags & 0x10) != 0;
if pcr_flag && length >= 7 {
let pcr_bytes = &data[offset + 1..offset + 7];
pcr_27mhz = Some(parse_pcr_27mhz(pcr_bytes));
}
}
offset += length;
}
let payload_len = if has_payload && offset <= TS_PACKET_SIZE {
TS_PACKET_SIZE - offset
} else {
0
};
Ok(TsPacket {
data,
pid,
payload_unit_start,
continuity_counter,
discontinuity,
pcr_27mhz,
payload_offset: offset,
payload_len,
})
}
fn parse_pcr_27mhz(data: &[u8]) -> u64 {
let base = ((data[0] as u64) << 25)
| ((data[1] as u64) << 17)
| ((data[2] as u64) << 9)
| ((data[3] as u64) << 1)
| ((data[4] as u64) >> 7);
let ext = (((data[4] as u64) & 0x01) << 8) | data[5] as u64;
base * 300 + ext
}
pub fn parse_pts_90khz(packet: &TsPacket) -> Option<u64> {
if !packet.payload_unit_start {
return None;
}
let payload = packet.payload();
if payload.len() < 14 {
return None;
}
if payload[0] != 0 || payload[1] != 0 || payload[2] != 1 {
return None;
}
let flags = payload[7];
let pts_dts_flags = (flags >> 6) & 0x03;
if pts_dts_flags == 0 {
return None;
}
let header_length = payload[8] as usize;
let pts_start = 9usize;
if header_length < 5 || payload.len() < pts_start + 5 {
return None;
}
let b = &payload[pts_start..pts_start + 5];
let pts = ((b[0] as u64 & 0x0E) << 29)
| ((b[1] as u64) << 22)
| ((b[2] as u64 & 0xFE) << 14)
| ((b[3] as u64) << 7)
| ((b[4] as u64) >> 1);
Some(pts)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Section {
pub pid: u16,
pub table_id: u8,
pub data: Vec<u8>,
}
#[derive(Debug, Default)]
pub struct SectionAssembler {
buffers: HashMap<u16, SectionBuffer>,
}
#[derive(Debug)]
struct SectionBuffer {
expected_len: usize,
data: Vec<u8>,
}
impl SectionAssembler {
pub fn push_packet(&mut self, packet: &TsPacket) -> Vec<Section> {
let mut sections = Vec::new();
let payload = packet.payload();
if payload.is_empty() {
return sections;
}
if packet.payload_unit_start {
let pointer = payload[0] as usize;
if pointer + 1 > payload.len() {
return sections;
}
let mut idx = 1 + pointer;
while idx + 3 <= payload.len() {
let table_id = payload[idx];
let section_length =
(((payload[idx + 1] & 0x0F) as usize) << 8) | payload[idx + 2] as usize;
let total_len = 3 + section_length;
if idx + total_len <= payload.len() {
let data = payload[idx..idx + total_len].to_vec();
sections.push(Section {
pid: packet.pid,
table_id,
data,
});
idx += total_len;
} else {
let data = payload[idx..].to_vec();
self.buffers.insert(
packet.pid,
SectionBuffer {
expected_len: total_len,
data,
},
);
break;
}
}
} else if let Some(buffer) = self.buffers.get_mut(&packet.pid) {
buffer.data.extend_from_slice(payload);
if buffer.data.len() >= buffer.expected_len {
let data = buffer.data[..buffer.expected_len].to_vec();
let table_id = data[0];
sections.push(Section {
pid: packet.pid,
table_id,
data,
});
self.buffers.remove(&packet.pid);
}
}
sections
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TimeSource {
AtscStt,
DvbTdt,
DvbTot,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BroadcastUtc {
pub unix_seconds: i64,
pub source: TimeSource,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimeSyncUpdate {
pub pcr_27mhz: Option<u64>,
pub utc_unix_seconds: Option<i64>,
pub chunk_index: Option<u64>,
pub chunk_start_27mhz: Option<u64>,
pub utc_start_unix: Option<i64>,
pub synced: bool,
pub discontinuity: bool,
}
#[derive(Debug)]
pub struct TimeSyncEngine {
chunk_ticks: u64,
last_pcr: Option<u64>,
utc_offset_ticks: Option<i64>,
synced: bool,
last_chunk_index: Option<u64>,
}
impl TimeSyncEngine {
pub fn new(chunk_duration_ms: u64) -> Self {
let chunk_ticks = chunk_duration_ms * 27_000;
Self {
chunk_ticks,
last_pcr: None,
utc_offset_ticks: None,
synced: false,
last_chunk_index: None,
}
}
pub fn ingest_packet(
&mut self,
packet: &TsPacket,
assembler: &mut SectionAssembler,
) -> Vec<TimeSyncUpdate> {
let mut updates = Vec::new();
if packet.discontinuity {
self.last_pcr = None;
self.utc_offset_ticks = None;
self.synced = false;
self.last_chunk_index = None;
updates.push(TimeSyncUpdate {
pcr_27mhz: packet.pcr_27mhz,
utc_unix_seconds: None,
chunk_index: None,
chunk_start_27mhz: None,
utc_start_unix: None,
synced: false,
discontinuity: true,
});
}
for section in assembler.push_packet(packet) {
if let Some(utc) = parse_time_section(&section) {
if let Some(pcr) = self.last_pcr {
let utc_ticks = utc.unix_seconds.saturating_mul(27_000_000);
let offset = utc_ticks - pcr as i64;
self.utc_offset_ticks = Some(offset);
self.synced = true;
}
updates.push(TimeSyncUpdate {
pcr_27mhz: self.last_pcr,
utc_unix_seconds: Some(utc.unix_seconds),
chunk_index: self.current_chunk_index(),
chunk_start_27mhz: self.current_chunk_start_27mhz(),
utc_start_unix: self.current_chunk_utc_start(),
synced: self.synced,
discontinuity: false,
});
}
}
if let Some(pcr) = packet.pcr_27mhz {
self.last_pcr = Some(pcr);
let chunk_index = self.current_chunk_index();
if chunk_index != self.last_chunk_index {
self.last_chunk_index = chunk_index;
updates.push(TimeSyncUpdate {
pcr_27mhz: Some(pcr),
utc_unix_seconds: self.current_utc_seconds(),
chunk_index,
chunk_start_27mhz: self.current_chunk_start_27mhz(),
utc_start_unix: self.current_chunk_utc_start(),
synced: self.synced,
discontinuity: false,
});
}
}
updates
}
fn current_utc_seconds(&self) -> Option<i64> {
let pcr = self.last_pcr? as i64;
let offset = self.utc_offset_ticks?;
Some((pcr + offset) / 27_000_000)
}
fn current_chunk_index(&self) -> Option<u64> {
let pcr = self.last_pcr? as i128;
let offset = self.utc_offset_ticks.unwrap_or(0) as i128;
let t = pcr + offset;
if t < 0 {
return None;
}
Some((t as u128 / self.chunk_ticks as u128) as u64)
}
fn current_chunk_start_27mhz(&self) -> Option<u64> {
let chunk_index = self.current_chunk_index()? as i128;
let offset = self.utc_offset_ticks.unwrap_or(0) as i128;
let anchored = chunk_index * self.chunk_ticks as i128;
let pcr = anchored - offset;
if pcr < 0 {
return None;
}
Some(pcr as u64)
}
fn current_chunk_utc_start(&self) -> Option<i64> {
let _ = self.utc_offset_ticks?;
let chunk_index = self.current_chunk_index()? as i128;
let anchored = chunk_index * self.chunk_ticks as i128;
Some((anchored / 27_000_000) as i64)
}
}
pub fn parse_time_section(section: &Section) -> Option<BroadcastUtc> {
match section.table_id {
TABLE_ID_ATSC_STT if section.pid == PID_ATSC_PSIP => {
parse_atsc_stt(&section.data).map(|utc| BroadcastUtc {
unix_seconds: utc,
source: TimeSource::AtscStt,
})
}
TABLE_ID_DVB_TDT if section.pid == PID_DVB_TDT_TOT => {
parse_dvb_time(&section.data).map(|utc| BroadcastUtc {
unix_seconds: utc,
source: TimeSource::DvbTdt,
})
}
TABLE_ID_DVB_TOT if section.pid == PID_DVB_TDT_TOT => {
parse_dvb_time(&section.data).map(|utc| BroadcastUtc {
unix_seconds: utc,
source: TimeSource::DvbTot,
})
}
_ => None,
}
}
fn parse_atsc_stt(data: &[u8]) -> Option<i64> {
if data.len() < 3 + 1 + 4 + 1 {
return None;
}
let system_time = u32::from_be_bytes([data[4], data[5], data[6], data[7]]) as i64;
let gps_utc_offset = data[8] as i64;
let utc_since_1980 = system_time - gps_utc_offset;
Some(utc_since_1980 - GPS_EPOCH_TO_UNIX)
}
fn parse_dvb_time(data: &[u8]) -> Option<i64> {
if data.len() < 8 {
return None;
}
let mjd = u16::from_be_bytes([data[3], data[4]]);
let hour = bcd_to_dec(data[5])?;
let minute = bcd_to_dec(data[6])?;
let second = bcd_to_dec(data[7])?;
let days = mjd as i64 - MJD_UNIX_EPOCH;
Some(days * 86_400 + hour as i64 * 3_600 + minute as i64 * 60 + second as i64)
}
fn bcd_to_dec(value: u8) -> Option<u32> {
let high = (value >> 4) & 0x0F;
let low = value & 0x0F;
if high > 9 || low > 9 {
return None;
}
Some((high as u32) * 10 + low as u32)
}
#[cfg(test)]
mod tests {
use super::*;
fn build_ts_packet_with_adaptation_pcr(
pid: u16,
continuity_counter: u8,
pcr_27mhz: u64,
) -> [u8; TS_PACKET_SIZE] {
// Encode PCR into base (90kHz) and extension (27MHz remainder).
let base = pcr_27mhz / 300;
let ext = pcr_27mhz % 300;
let mut pcr = [0u8; 6];
pcr[0] = ((base >> 25) & 0xFF) as u8;
pcr[1] = ((base >> 17) & 0xFF) as u8;
pcr[2] = ((base >> 9) & 0xFF) as u8;
pcr[3] = ((base >> 1) & 0xFF) as u8;
pcr[4] = (((base & 0x1) << 7) as u8) | 0x7E | (((ext >> 8) & 0x1) as u8);
pcr[5] = (ext & 0xFF) as u8;
let mut data = [0u8; TS_PACKET_SIZE];
data[0] = SYNC_BYTE;
data[1] = ((pid >> 8) as u8) & 0x1F;
data[2] = (pid & 0xFF) as u8;
// adaptation only (no payload): adaptation_control=2
data[3] = (2 << 4) | (continuity_counter & 0x0F);
// adaptation length: 1 byte flags + 6 bytes PCR = 7
data[4] = 7;
// flags: PCR flag
data[5] = 0x10;
data[6..12].copy_from_slice(&pcr);
data
}
fn encode_pts_90khz(pts: u64) -> [u8; 5] {
let mut b = [0u8; 5];
b[0] = 0x20 | ((((pts >> 30) & 0x07) as u8) << 1) | 1;
b[1] = ((pts >> 22) & 0xFF) as u8;
b[2] = ((((pts >> 15) & 0x7F) as u8) << 1) | 1;
b[3] = ((pts >> 7) & 0xFF) as u8;
b[4] = (((pts & 0x7F) as u8) << 1) | 1;
b
}
#[test]
fn parse_packet_extracts_pid_and_payload() {
let pid = 0x0033u16;
let mut data = [0u8; TS_PACKET_SIZE];
data[0] = SYNC_BYTE;
data[1] = 0x40 | (((pid >> 8) as u8) & 0x1F); // payload_unit_start
data[2] = (pid & 0xFF) as u8;
data[3] = (1 << 4) | 0x0A; // payload only
data[4] = 0xAA;
let pkt = parse_packet(data).unwrap();
assert_eq!(pkt.pid, pid);
assert!(pkt.payload_unit_start);
assert_eq!(pkt.continuity_counter, 0x0A);
assert_eq!(pkt.payload()[0], 0xAA);
}
#[test]
fn parse_packet_rejects_bad_sync() {
let mut data = [0u8; TS_PACKET_SIZE];
data[0] = 0;
assert!(parse_packet(data).is_err());
}
#[test]
fn parse_packet_rejects_invalid_adaptation_length() {
let pid = 0x0011u16;
let mut data = [0u8; TS_PACKET_SIZE];
data[0] = SYNC_BYTE;
data[1] = ((pid >> 8) as u8) & 0x1F;
data[2] = (pid & 0xFF) as u8;
data[3] = (3 << 4) | 0x00; // adaptation + payload
data[4] = 250; // too large
assert!(parse_packet(data).is_err());
}
#[test]
fn parse_packet_reads_pcr_27mhz() {
let pcr = 54_000_123u64;
let data = build_ts_packet_with_adaptation_pcr(0x0100, 0, pcr);
let pkt = parse_packet(data).unwrap();
assert_eq!(pkt.pcr_27mhz, Some(pcr));
}
#[test]
fn parse_pts_extracts_expected_value() {
let pid = 0x0020u16;
let pts = 90_000u64 * 3;
let pts_bytes = encode_pts_90khz(pts);
let mut data = [0u8; TS_PACKET_SIZE];
data[0] = SYNC_BYTE;
data[1] = 0x40 | (((pid >> 8) as u8) & 0x1F);
data[2] = (pid & 0xFF) as u8;
data[3] = (1 << 4) | 0x00; // payload only
// Minimal PES header with PTS.
let payload = &mut data[4..];
payload[0..3].copy_from_slice(&[0, 0, 1]);
payload[3] = 0xE0;
payload[7] = 0x80; // pts_dts_flags = 2
payload[8] = 5; // header length
payload[9..14].copy_from_slice(&pts_bytes);
let pkt = parse_packet(data).unwrap();
let parsed = parse_pts_90khz(&pkt).unwrap();
assert_eq!(parsed, pts);
}
#[test]
fn section_assembler_reassembles_across_packets() {
let pid = 0x0014u16;
let table_id = TABLE_ID_DVB_TDT;
// A tiny "section" with declared length 10 (3 + 10 = 13 bytes total).
let total_len = 13usize;
let section_length = (total_len - 3) as u16;
let mut section = vec![0u8; total_len];
section[0] = table_id;
section[1] = 0x00 | (((section_length >> 8) as u8) & 0x0F);
section[2] = (section_length & 0xFF) as u8;
for i in 3..total_len {
section[i] = i as u8;
}
// Packet 1: payload is intentionally short (via a large adaptation field) so the assembler
// must buffer until packet 2 arrives.
let mut pkt1 = [0u8; TS_PACKET_SIZE];
pkt1[0] = SYNC_BYTE;
pkt1[1] = 0x40 | (((pid >> 8) as u8) & 0x1F);
pkt1[2] = (pid & 0xFF) as u8;
pkt1[3] = (3 << 4) | 0; // adaptation + payload
let payload_len_1 = 8usize; // 1 pointer + 7 bytes of section
let adaptation_len_1 = (TS_PACKET_SIZE - 5) - payload_len_1;
pkt1[4] = adaptation_len_1 as u8;
// adaptation flags at pkt1[5] left as 0; rest is stuffing 0.
let payload_start_1 = 4 + 1 + adaptation_len_1;
pkt1[payload_start_1] = 0; // pointer = 0
pkt1[payload_start_1 + 1..payload_start_1 + 1 + 7].copy_from_slice(&section[..7]);
let mut pkt2 = [0u8; TS_PACKET_SIZE];
pkt2[0] = SYNC_BYTE;
pkt2[1] = ((pid >> 8) as u8) & 0x1F;
pkt2[2] = (pid & 0xFF) as u8;
pkt2[3] = (1 << 4) | 1; // payload only
pkt2[4..4 + (total_len - 7)].copy_from_slice(&section[7..]);
let p1 = parse_packet(pkt1).unwrap();
let p2 = parse_packet(pkt2).unwrap();
let mut asm = SectionAssembler::default();
assert!(asm.push_packet(&p1).is_empty());
let out = asm.push_packet(&p2);
assert_eq!(out.len(), 1);
assert_eq!(out[0].pid, pid);
assert_eq!(out[0].table_id, table_id);
assert_eq!(out[0].data.len(), total_len);
assert_eq!(out[0].data[3], 3u8);
}
#[test]
fn parse_time_sections_for_dvb_and_atsc_epoch() {
// DVB TDT at UNIX epoch.
let mut dvb = vec![0u8; 8];
dvb[0] = TABLE_ID_DVB_TDT;
dvb[3] = 0x9E;
dvb[4] = 0x8B; // MJD 40587
dvb[5] = 0x00;
dvb[6] = 0x00;
dvb[7] = 0x00;
let section = Section {
pid: PID_DVB_TDT_TOT,
table_id: TABLE_ID_DVB_TDT,
data: dvb,
};
let utc = parse_time_section(&section).unwrap();
assert_eq!(utc.unix_seconds, 0);
// ATSC STT at UNIX epoch according to our parser logic.
let mut atsc = vec![0u8; 9];
atsc[0] = TABLE_ID_ATSC_STT;
let system_time = GPS_EPOCH_TO_UNIX as u32;
atsc[4..8].copy_from_slice(&system_time.to_be_bytes());
atsc[8] = 0;
let section = Section {
pid: PID_ATSC_PSIP,
table_id: TABLE_ID_ATSC_STT,
data: atsc,
};
let utc = parse_time_section(&section).unwrap();
assert_eq!(utc.unix_seconds, 0);
}
#[test]
fn time_sync_engine_emits_chunk_boundaries_from_pcr() {
let mut engine = TimeSyncEngine::new(1000);
let mut asm = SectionAssembler::default();
let p0 = parse_packet(build_ts_packet_with_adaptation_pcr(0x0100, 0, 0)).unwrap();
let p1 = parse_packet(build_ts_packet_with_adaptation_pcr(0x0100, 1, 27_000_000)).unwrap();
let u0 = engine.ingest_packet(&p0, &mut asm);
assert!(u0.iter().any(|u| u.chunk_index == Some(0)));
let u1 = engine.ingest_packet(&p1, &mut asm);
assert!(u1.iter().any(|u| u.chunk_index == Some(1)));
}
}