every.channel: sanitized baseline

This commit is contained in:
every.channel 2026-02-15 16:17:27 -05:00
commit 897e556bea
No known key found for this signature in database
258 changed files with 74298 additions and 0 deletions

View file

@ -0,0 +1,5 @@
**
# Only include the container build context we need.
!containers/
!containers/**

View file

@ -0,0 +1,19 @@
[package]
name = "ec-cf-bootstrap-api"
version = "0.0.0"
edition = "2021"
license = "AGPL-3.0-only"
[dependencies]
anyhow = "1"
axum = { version = "0.7", features = ["json"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "signal"] }
tower-http = { version = "0.6", features = ["trace"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Keep this out of the workspace; it's built in a container image.
[workspace]

View file

@ -0,0 +1,18 @@
# Cloudflare Containers build: compile a small, portable Rust HTTP server.
# This container only serves the bootstrap /api/* endpoints used for WebRTC rendezvous.
FROM rust:1.86-bookworm AS build
WORKDIR /app
# Pre-copy manifest to prime dependency caching.
COPY Cargo.toml /app/Cargo.toml
COPY src /app/src
RUN cargo build --release
FROM debian:bookworm-slim
WORKDIR /app
COPY --from=build /app/target/release/ec-cf-bootstrap-api /app/ec-cf-bootstrap-api
ENV RUST_LOG=info
EXPOSE 8080
CMD ["/app/ec-cf-bootstrap-api"]

View file

@ -0,0 +1,252 @@
use axum::{
extract::Query,
http::{HeaderMap, StatusCode},
response::IntoResponse,
routing::{get, post},
Json, Router,
};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
net::SocketAddr,
sync::Arc,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tokio::sync::RwLock;
use tower_http::trace::TraceLayer;
#[derive(Clone, Debug, Serialize, Deserialize)]
struct DirectoryEntry {
stream_id: String,
title: String,
offer: String,
updated_ms: u64,
expires_ms: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct AnswerEntry {
stream_id: String,
answer: String,
updated_ms: u64,
expires_ms: u64,
}
#[derive(Clone, Debug, Serialize)]
struct DirectoryList {
now_ms: u64,
entries: Vec<DirectoryEntry>,
}
#[derive(Clone, Debug, Serialize)]
struct HealthResp {
ok: bool,
}
#[derive(Clone, Debug, Deserialize)]
struct AnnounceReq {
stream_id: String,
title: String,
offer: String,
expires_ms: Option<u64>,
}
#[derive(Clone, Debug, Serialize)]
struct AnnounceResp {
ok: bool,
ttl_ms: u64,
entry: DirectoryEntry,
}
#[derive(Clone, Debug, Deserialize)]
struct AnswerPostReq {
stream_id: String,
answer: String,
}
#[derive(Clone, Debug, Deserialize)]
struct AnswerGetReq {
stream_id: String,
}
#[derive(Default)]
struct State {
entries: HashMap<String, DirectoryEntry>,
answers: HashMap<String, AnswerEntry>,
}
fn now_ms() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or(Duration::from_secs(0))
.as_millis() as u64
}
fn clamp_str(mut s: String, max_len: usize) -> String {
if s.len() <= max_len {
return s;
}
s.truncate(max_len);
s
}
fn json_headers() -> HeaderMap {
let mut headers = HeaderMap::new();
headers.insert("content-type", "application/json; charset=utf-8".parse().unwrap());
headers.insert("cache-control", "no-store".parse().unwrap());
headers
}
fn prune_state(state: &mut State, now: u64) {
state.entries.retain(|_, v| v.expires_ms > now);
state.answers.retain(|_, v| v.expires_ms > now);
// Cap growth defensively. This is not spam-resistant; it's a bootstrap rendezvous.
if state.entries.len() > 200 {
let mut items = state.entries.values().cloned().collect::<Vec<_>>();
items.sort_by_key(|e| std::cmp::Reverse(e.updated_ms));
items.truncate(200);
state.entries = items.into_iter().map(|e| (e.stream_id.clone(), e)).collect();
}
if state.answers.len() > 500 {
let mut items = state.answers.values().cloned().collect::<Vec<_>>();
items.sort_by_key(|e| std::cmp::Reverse(e.updated_ms));
items.truncate(500);
state.answers = items.into_iter().map(|e| (e.stream_id.clone(), e)).collect();
}
}
async fn health() -> impl IntoResponse {
(json_headers(), Json(HealthResp { ok: true }))
}
async fn directory(state: axum::extract::State<Arc<RwLock<State>>>) -> impl IntoResponse {
let now = now_ms();
let mut guard = state.write().await;
prune_state(&mut guard, now);
let mut entries = guard.entries.values().cloned().collect::<Vec<_>>();
entries.sort_by_key(|e| std::cmp::Reverse(e.updated_ms));
(json_headers(), Json(DirectoryList { now_ms: now, entries }))
}
async fn announce(
state: axum::extract::State<Arc<RwLock<State>>>,
Json(body): Json<AnnounceReq>,
) -> impl IntoResponse {
let now = now_ms();
if body.stream_id.is_empty() || body.title.is_empty() || body.offer.is_empty() {
let resp = serde_json::json!({ "error": "missing stream_id/title/offer" });
return (StatusCode::BAD_REQUEST, json_headers(), Json(resp)).into_response();
}
if body.offer.len() > 64_000 {
let resp = serde_json::json!({ "error": "offer too large" });
return (StatusCode::PAYLOAD_TOO_LARGE, json_headers(), Json(resp)).into_response();
}
let requested_expires = body.expires_ms.unwrap_or(now + 20_000);
let requested_ttl = requested_expires.saturating_sub(now);
let ttl_ms = requested_ttl.clamp(5_000, 60_000);
let entry = DirectoryEntry {
stream_id: clamp_str(body.stream_id, 256),
title: clamp_str(body.title, 128),
offer: body.offer,
updated_ms: now,
expires_ms: now + ttl_ms,
};
let mut guard = state.write().await;
prune_state(&mut guard, now);
guard.entries.insert(entry.stream_id.clone(), entry.clone());
(
json_headers(),
Json(AnnounceResp {
ok: true,
ttl_ms,
entry,
}),
)
.into_response()
}
async fn post_answer(
state: axum::extract::State<Arc<RwLock<State>>>,
Json(body): Json<AnswerPostReq>,
) -> impl IntoResponse {
let now = now_ms();
if body.stream_id.is_empty() || body.answer.is_empty() {
let resp = serde_json::json!({ "error": "missing stream_id/answer" });
return (StatusCode::BAD_REQUEST, json_headers(), Json(resp)).into_response();
}
if body.answer.len() > 64_000 {
let resp = serde_json::json!({ "error": "answer too large" });
return (StatusCode::PAYLOAD_TOO_LARGE, json_headers(), Json(resp)).into_response();
}
let entry = AnswerEntry {
stream_id: clamp_str(body.stream_id, 256),
answer: body.answer,
updated_ms: now,
expires_ms: now + 2 * 60_000,
};
let mut guard = state.write().await;
prune_state(&mut guard, now);
guard.answers.insert(entry.stream_id.clone(), entry);
(json_headers(), Json(serde_json::json!({ "ok": true }))).into_response()
}
async fn get_answer(
state: axum::extract::State<Arc<RwLock<State>>>,
Query(q): Query<AnswerGetReq>,
) -> impl IntoResponse {
let now = now_ms();
if q.stream_id.is_empty() {
let resp = serde_json::json!({ "error": "missing stream_id" });
return (StatusCode::BAD_REQUEST, json_headers(), Json(resp)).into_response();
}
let mut guard = state.write().await;
prune_state(&mut guard, now);
// One-shot: first reader consumes.
let Some(answer) = guard.answers.remove(&q.stream_id) else {
let resp = serde_json::json!({ "error": "not found" });
return (StatusCode::NOT_FOUND, json_headers(), Json(resp)).into_response();
};
(json_headers(), Json(answer)).into_response()
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
std::env::var("RUST_LOG").unwrap_or_else(|_| "info,tower_http=info".to_string()),
)
.init();
let state = Arc::new(RwLock::new(State::default()));
let app = Router::new()
.route("/api/health", get(health))
.route("/api/directory", get(directory))
.route("/api/announce", post(announce))
.route("/api/answer", post(post_answer).get(get_answer))
.with_state(state)
.layer(TraceLayer::new_for_http());
let addr: SocketAddr = "0.0.0.0:8080".parse().unwrap();
tracing::info!("ec-cf-bootstrap-api listening on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await?;
Ok(())
}
async fn shutdown_signal() {
let _ = tokio::signal::ctrl_c().await;
}

1519
deploy/cloudflare-worker/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
{
"name": "every-channel-site",
"private": true,
"type": "module",
"dependencies": {},
"devDependencies": {
"typescript": "5.9.3",
"wrangler": "^4.63.0"
},
"scripts": {
"cf-typegen": "wrangler types",
"dev": "wrangler dev --local --port 8787",
"deploy": "wrangler deploy"
}
}

View file

@ -0,0 +1,575 @@
function json(data: unknown, init?: ResponseInit): Response {
return new Response(JSON.stringify(data), {
...init,
headers: {
"content-type": "application/json; charset=utf-8",
...(init?.headers ?? {}),
},
});
}
function jsonNoStore(data: unknown, init?: ResponseInit): Response {
return json(data, {
...init,
headers: {
"cache-control": "no-store",
...(init?.headers ?? {}),
},
});
}
async function hmacBase64(
secret: string,
msg: string,
hash: "SHA-1" | "SHA-256",
): Promise<string> {
const enc = new TextEncoder();
const key = await crypto.subtle.importKey(
"raw",
enc.encode(secret),
{ name: "HMAC", hash: { name: hash } },
false,
["sign"],
);
const sig = await crypto.subtle.sign("HMAC", key, enc.encode(msg));
const bytes = new Uint8Array(sig);
let s = "";
for (const b of bytes) s += String.fromCharCode(b);
return btoa(s);
}
async function handleTurn(env: Env): Promise<Response> {
// Always provide STUN. TURN is optional and requires a shared secret.
// Response shape is compatible with `just-webrtc` types.
const ice_servers: Array<{
urls: string[];
username: string;
credential: string;
// `just-webrtc` uses serde defaults on enum variants (Password/Oauth).
credential_type: "Password" | "Oauth";
}> = [];
ice_servers.push({
urls: [
"stun:stun.cloudflare.com:3478",
"stun:stun.l.google.com:19302",
"stun:stun1.l.google.com:19302",
],
username: "",
credential: "",
credential_type: "Password",
});
const shared = env.EC_TURN_SHARED_SECRET?.trim();
if (shared) {
const ttlSec = Number(env.EC_TURN_TTL_SECS ?? "3600") || 3600;
const exp = Math.floor(Date.now() / 1000) + Math.max(60, ttlSec);
const prefix = (env.EC_TURN_USER_PREFIX ?? "every.channel").trim();
const username = `${exp}:${prefix}`;
const hash = (env.EC_TURN_HMAC ?? "sha1").toLowerCase() === "sha256" ? "SHA-256" : "SHA-1";
const credential = await hmacBase64(shared, username, hash);
const host = (env.EC_TURN_HOST ?? "turn.cloudflare.com").trim();
ice_servers.push({
urls: [
`turn:${host}:3478?transport=udp`,
`turn:${host}:3478?transport=tcp`,
`turns:${host}:5349?transport=tcp`,
],
username,
credential,
credential_type: "Password",
});
}
return json({ ice_servers });
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const url = new URL(request.url);
// ICE bootstrap for WebRTC clients (browser + native).
if (url.pathname === "/api/turn") {
return handleTurn(env);
}
// Stream relay (one-to-many) for CMAF object frames transported via direct-wire chunks.
// This is a bootstrap relay so browsers can watch live streams without direct P2P.
if (url.pathname === "/api/stream/ws") {
const stream_id = url.searchParams.get("stream_id") ?? "";
if (!stream_id) {
return jsonNoStore({ error: "missing stream_id" }, { status: 400 });
}
const id = env.EC_STREAM.idFromName(stream_id);
const stub = env.EC_STREAM.get(id);
return stub.fetch(request);
}
// Minimal bootstrap API: proxy /api/* to a single durable object instance ("global").
// This exists only to rendezvous WebRTC offers/answers and list "live" entries.
if (url.pathname.startsWith("/api/")) {
const id = env.EC_API.idFromName("global");
const stub = env.EC_API.get(id);
return stub.fetch(request);
}
// Serve static assets from the Worker Assets binding.
// SPA fallback: unknown paths serve the app shell (`/index.html`).
const assets = (env as unknown as { ASSETS?: Fetcher }).ASSETS;
if (!assets || typeof (assets as any).fetch !== "function") {
return new Response("Assets binding not configured", { status: 500 });
}
const res = await assets.fetch(request);
if (res.status !== 404) return res;
url.pathname = "/index.html";
return assets.fetch(new Request(url.toString(), request));
},
};
interface Env {
ASSETS: Fetcher;
EC_API: DurableObjectNamespace;
EC_STREAM: DurableObjectNamespace;
// Optional TURN REST shared secret (Worker secret). When set, `/api/turn` includes TURN URLs
// with short-lived credentials derived from this shared secret.
EC_TURN_SHARED_SECRET?: string;
EC_TURN_TTL_SECS?: string;
EC_TURN_USER_PREFIX?: string;
EC_TURN_HOST?: string;
EC_TURN_HMAC?: string;
}
type DirectoryEntry = {
stream_id: string;
title: string;
offer: string;
updated_ms: number;
expires_ms: number;
};
type AnswerEntry = {
stream_id: string;
answer: string;
updated_ms: number;
expires_ms: number;
};
type DirectoryList = {
now_ms: number;
entries: DirectoryEntry[];
};
function nowMs(): number {
return Date.now();
}
function clampStr(s: string, maxLen: number): string {
if (s.length <= maxLen) return s;
return s.slice(0, maxLen);
}
function entryKey(streamId: string): string {
return `e:${streamId}`;
}
function answerKey(streamId: string): string {
return `a:${streamId}`;
}
async function listWithPrefix<T>(
storage: DurableObjectStorage,
prefix: string,
): Promise<Array<[string, T]>> {
const out: Array<[string, T]> = [];
// Pull in small pages; this is a bootstrap rendezvous, not a global index.
let cursor: string | undefined = undefined;
for (;;) {
const page = await storage.list<T>({ prefix, cursor, limit: 256 });
for (const [k, v] of page) out.push([k, v]);
if (page.cursor) cursor = page.cursor;
else break;
}
return out;
}
async function pruneAndCap(
storage: DurableObjectStorage,
now: number,
): Promise<void> {
const entries = await listWithPrefix<DirectoryEntry>(storage, "e:");
const answers = await listWithPrefix<AnswerEntry>(storage, "a:");
const toDelete: string[] = [];
for (const [k, v] of entries) if (v.expires_ms <= now) toDelete.push(k);
for (const [k, v] of answers) if (v.expires_ms <= now) toDelete.push(k);
// Cap growth defensively. This is not spam-resistant; it's a bootstrap rendezvous.
if (entries.length > 200) {
const sorted = entries
.map(([, v]) => v)
.sort((a, b) => b.updated_ms - a.updated_ms)
.slice(0, 200);
const keep = new Set(sorted.map((e) => entryKey(e.stream_id)));
for (const [k] of entries) if (!keep.has(k)) toDelete.push(k);
}
if (answers.length > 500) {
const sorted = answers
.map(([, v]) => v)
.sort((a, b) => b.updated_ms - a.updated_ms)
.slice(0, 500);
const keep = new Set(sorted.map((a) => answerKey(a.stream_id)));
for (const [k] of answers) if (!keep.has(k)) toDelete.push(k);
}
if (toDelete.length > 0) {
// Delete in chunks to avoid oversized requests.
for (let i = 0; i < toDelete.length; i += 128) {
await storage.delete(toDelete.slice(i, i + 128));
}
}
}
type AnnounceReq = {
stream_id: string;
title: string;
offer: string;
expires_ms?: number;
};
type AnswerPostReq = {
stream_id: string;
answer: string;
};
// Minimal bootstrap API Durable Object. The binding name is historical; we keep it stable so
// existing migrations and wrangler config remain valid while removing Cloudflare Containers.
export class EcApiContainer implements DurableObject {
private state: DurableObjectState;
constructor(state: DurableObjectState) {
this.state = state;
}
async fetch(request: Request): Promise<Response> {
const url = new URL(request.url);
const now = nowMs();
// Best-effort pruning on any request.
await pruneAndCap(this.state.storage, now);
if (url.pathname === "/api/health") {
return jsonNoStore({ ok: true });
}
if (url.pathname === "/api/directory") {
const items = await listWithPrefix<DirectoryEntry>(this.state.storage, "e:");
const entries = items
.map(([, v]) => v)
.filter((v) => v.expires_ms > now)
.sort((a, b) => b.updated_ms - a.updated_ms);
const resp: DirectoryList = { now_ms: now, entries };
return jsonNoStore(resp);
}
if (url.pathname === "/api/announce") {
if (request.method !== "POST") {
return jsonNoStore({ error: "method not allowed" }, { status: 405 });
}
let body: AnnounceReq;
try {
body = (await request.json()) as AnnounceReq;
} catch {
return jsonNoStore({ error: "invalid json" }, { status: 400 });
}
if (!body.stream_id || !body.title || !body.offer) {
return jsonNoStore({ error: "missing stream_id/title/offer" }, { status: 400 });
}
if (body.offer.length > 64_000) {
return jsonNoStore({ error: "offer too large" }, { status: 413 });
}
const requestedExpires = body.expires_ms ?? now + 20_000;
const requestedTtl = Math.max(0, requestedExpires - now);
const ttlMs = Math.min(60_000, Math.max(5_000, requestedTtl));
const entry: DirectoryEntry = {
stream_id: clampStr(body.stream_id, 256),
title: clampStr(body.title, 128),
offer: body.offer,
updated_ms: now,
expires_ms: now + ttlMs,
};
await this.state.storage.put(entryKey(entry.stream_id), entry);
return jsonNoStore({ ok: true, ttl_ms: ttlMs, entry });
}
if (url.pathname === "/api/answer") {
if (request.method === "POST") {
let body: AnswerPostReq;
try {
body = (await request.json()) as AnswerPostReq;
} catch {
return jsonNoStore({ error: "invalid json" }, { status: 400 });
}
if (!body.stream_id || !body.answer) {
return jsonNoStore({ error: "missing stream_id/answer" }, { status: 400 });
}
if (body.answer.length > 64_000) {
return jsonNoStore({ error: "answer too large" }, { status: 413 });
}
const entry: AnswerEntry = {
stream_id: clampStr(body.stream_id, 256),
answer: body.answer,
updated_ms: now,
expires_ms: now + 2 * 60_000,
};
await this.state.storage.put(answerKey(entry.stream_id), entry);
return jsonNoStore({ ok: true });
}
if (request.method === "GET") {
const streamId = url.searchParams.get("stream_id") ?? "";
if (!streamId) {
return jsonNoStore({ error: "missing stream_id" }, { status: 400 });
}
const key = answerKey(streamId);
const ans = await this.state.storage.get<AnswerEntry>(key);
if (!ans || ans.expires_ms <= now) {
await this.state.storage.delete(key);
return jsonNoStore({ error: "not found" }, { status: 404 });
}
// One-shot: first reader consumes.
await this.state.storage.delete(key);
return jsonNoStore(ans);
}
return jsonNoStore({ error: "method not allowed" }, { status: 405 });
}
return jsonNoStore({ error: "not found" }, { status: 404 });
}
}
// Historical class referenced by older migrations. It is not bound anymore,
// but exporting it keeps workerd/wrangler happy if migrations mention it.
export class DirectoryDO implements DurableObject {
async fetch(): Promise<Response> {
return new Response("gone", { status: 410 });
}
}
const DIRECT_WIRE_TAG_FRAME = 0x00;
const DIRECT_WIRE_TAG_STREAM = 0x01;
const DIRECT_WIRE_TAG_PING = 0x02;
const DIRECT_WIRE_CHUNK_BYTES = 16 * 1024;
type TimingMeta = {
chunk_index: number;
};
type ObjectMeta = {
content_type: string;
timing?: TimingMeta;
};
// One-to-many fanout for direct-wire message chunks. Publisher sends direct-wire messages,
// subscribers receive the same stream, plus a buffered init + recent segments upon join.
export class StreamRelayDO implements DurableObject {
private publisher: WebSocket | null = null;
private subs = new Set<WebSocket>();
// Reassemble publisher STREAM chunks into full object frames for buffering.
private buf = new Uint8Array(0);
private want: number | null = null;
private initFrame: Uint8Array | null = null;
private segFrames = new Map<number, Uint8Array>();
private maxSegments = 12;
async fetch(request: Request): Promise<Response> {
const upgrade = request.headers.get("Upgrade")?.toLowerCase();
if (upgrade !== "websocket") {
return json({ error: "expected websocket" }, { status: 400 });
}
const url = new URL(request.url);
const role = (url.searchParams.get("role") ?? "sub").toLowerCase();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const pair = new (WebSocketPair as any)();
const client: WebSocket = pair[0];
const server: WebSocket = pair[1];
server.accept();
if (role === "pub") {
if (this.publisher) {
try {
this.publisher.close(1013, "publisher replaced");
} catch {}
}
this.publisher = server;
} else {
this.subs.add(server);
// Best-effort fast start: send init + last segments.
try {
if (this.initFrame) {
this.sendFrame(server, this.initFrame);
}
const idxs = Array.from(this.segFrames.keys()).sort((a, b) => a - b);
for (const idx of idxs) {
const fr = this.segFrames.get(idx);
if (fr) this.sendFrame(server, fr);
}
} catch {}
}
server.addEventListener("message", (evt: MessageEvent) => {
const data = evt.data;
if (!(data instanceof ArrayBuffer)) {
return;
}
const msg = new Uint8Array(data);
if (role === "pub") {
// Fanout immediately (no buffering delay).
for (const sub of this.subs) {
try {
sub.send(data);
} catch {}
}
// Also decode for buffered join.
this.handlePublisherMsg(msg);
} else {
// Subscribers can send pings; ignore.
if (msg.length > 0 && msg[0] === DIRECT_WIRE_TAG_PING) {
return;
}
}
});
server.addEventListener("close", () => {
if (server === this.publisher) {
this.publisher = null;
// Keep buffered init/segments; publisher may reconnect soon.
}
this.subs.delete(server);
});
server.addEventListener("error", () => {
if (server === this.publisher) {
this.publisher = null;
}
this.subs.delete(server);
});
return new Response(null, { status: 101, webSocket: client });
}
private handlePublisherMsg(msg: Uint8Array) {
if (msg.length === 0) return;
const tag = msg[0];
if (tag === DIRECT_WIRE_TAG_PING) return;
if (tag === DIRECT_WIRE_TAG_FRAME) {
const frame = msg.subarray(1);
this.bufferFrame(frame);
return;
}
if (tag !== DIRECT_WIRE_TAG_STREAM) {
// Legacy: assume the whole thing is a frame.
this.bufferFrame(msg);
return;
}
// Append to reassembly buffer.
const chunk = msg.subarray(1);
this.buf = concatU8(this.buf, chunk);
// Pull as many framed payloads as possible:
// [u32be len][frame bytes...]
// The `frame` bytes are `encode_object_frame(meta, data)`.
while (true) {
if (this.want === null) {
if (this.buf.length < 4) return;
this.want =
(this.buf[0] << 24) | (this.buf[1] << 16) | (this.buf[2] << 8) | this.buf[3];
this.buf = this.buf.subarray(4);
}
const want = this.want ?? 0;
if (this.buf.length < want) return;
const frame = this.buf.subarray(0, want);
this.buf = this.buf.subarray(want);
this.want = null;
this.bufferFrame(frame);
// Avoid unbounded growth if publisher sends junk.
if (this.buf.length > 4 * 1024 * 1024) {
this.buf = new Uint8Array(0);
this.want = null;
return;
}
}
}
private bufferFrame(frame: Uint8Array) {
const meta = tryDecodeObjectMeta(frame);
const idx = meta?.timing?.chunk_index;
if (typeof idx !== "number") return;
if (idx === 0) {
this.initFrame = frame.slice();
return;
}
this.segFrames.set(idx, frame.slice());
while (this.segFrames.size > this.maxSegments) {
const oldest = Math.min(...this.segFrames.keys());
this.segFrames.delete(oldest);
}
}
private sendFrame(ws: WebSocket, frame: Uint8Array) {
// Mirror the Rust `direct_wire_send_frame` format:
// Stream bytes are [u32be frame_len][frame]
const out = new Uint8Array(4 + frame.length);
const len = frame.length >>> 0;
out[0] = (len >>> 24) & 0xff;
out[1] = (len >>> 16) & 0xff;
out[2] = (len >>> 8) & 0xff;
out[3] = len & 0xff;
out.set(frame, 4);
for (let i = 0; i < out.length; i += DIRECT_WIRE_CHUNK_BYTES) {
const chunk = out.subarray(i, Math.min(out.length, i + DIRECT_WIRE_CHUNK_BYTES));
const msg = new Uint8Array(1 + chunk.length);
msg[0] = DIRECT_WIRE_TAG_STREAM;
msg.set(chunk, 1);
ws.send(msg);
}
}
}
function concatU8(a: Uint8Array, b: Uint8Array): Uint8Array {
if (a.length === 0) return b.slice();
if (b.length === 0) return a.slice();
const out = new Uint8Array(a.length + b.length);
out.set(a, 0);
out.set(b, a.length);
return out;
}
function tryDecodeObjectMeta(frame: Uint8Array): ObjectMeta | null {
if (frame.length < 4) return null;
const metaLen = (frame[0] << 24) | (frame[1] << 16) | (frame[2] << 8) | frame[3];
if (metaLen < 2 || frame.length < 4 + metaLen) return null;
const metaBytes = frame.subarray(4, 4 + metaLen);
try {
const txt = new TextDecoder().decode(metaBytes);
return JSON.parse(txt) as ObjectMeta;
} catch {
return null;
}
}

View file

@ -0,0 +1,37 @@
name = "every-channel-site"
main = "src/index.ts"
compatibility_date = "2026-02-08"
workers_dev = false
account_id = "9a54fd76c3d5d9abac437382a9027e9b"
# Bind this worker to the every.channel zone.
# This uses Workers Custom Domains (not "routes to an origin"), so it can serve the site without
# an application server behind it.
routes = [
{ pattern = "every.channel", custom_domain = true },
{ pattern = "www.every.channel", custom_domain = true },
]
# Static assets built by Trunk (apps/tauri/ui -> apps/tauri/dist)
[assets]
directory = "../../apps/tauri/dist"
[[durable_objects.bindings]]
name = "EC_API"
class_name = "EcApiContainer"
[[durable_objects.bindings]]
name = "EC_STREAM"
class_name = "StreamRelayDO"
[[migrations]]
tag = "v2"
new_sqlite_classes = ["DirectoryDO"] # historical; safe to keep (namespace already created)
[[migrations]]
tag = "v3"
new_sqlite_classes = ["EcApiContainer"]
[[migrations]]
tag = "v4"
new_sqlite_classes = ["StreamRelayDO"]