every.channel: sanitized baseline

This commit is contained in:
every.channel 2026-02-15 16:17:27 -05:00
commit 897e556bea
No known key found for this signature in database
258 changed files with 74298 additions and 0 deletions

View file

@ -0,0 +1,7 @@
[env]
SDKROOT = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
LLVM_CONFIG_PATH = "/opt/homebrew/opt/llvm/bin/llvm-config"
BINDGEN_EXTRA_CLANG_ARGS = "-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include"
[target.aarch64-apple-darwin]
rustflags = []

3
third_party/iroh-live/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
/target
SANDBOX
target

9665
third_party/iroh-live/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load diff

11
third_party/iroh-live/Cargo.toml vendored Normal file
View file

@ -0,0 +1,11 @@
[workspace]
members = [
"iroh-live",
"iroh-moq",
"moq-media",
"web-transport-iroh",
]
resolver = "2"
[profile.release]
debug = true

201
third_party/iroh-live/LICENSE-APACHE vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2025] [N0, INC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

201
third_party/iroh-live/LICENSE-MIT vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2025] [N0, INC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

28
third_party/iroh-live/Makefile.toml vendored Normal file
View file

@ -0,0 +1,28 @@
# Use cargo-make to run tasks here: https://crates.io/crates/cargo-make
[tasks.format]
workspace = false
command = "cargo"
args = [
"fmt",
"--all",
"--",
"--config",
"unstable_features=true",
"--config",
"imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true,format_code_in_doc_comments=true",
]
[tasks.format-check]
workspace = false
command = "cargo"
args = [
"fmt",
"--all",
"--check",
"--",
"--config",
"unstable_features=true",
"--config",
"imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true,format_code_in_doc_comments=true",
]

72
third_party/iroh-live/README.md vendored Normal file
View file

@ -0,0 +1,72 @@
# iroh-live
Livestreaming video and audio over iroh
**Status: experimental / work in progress**
This repository implements a live streaming protocol over iroh with [Media over Quic](https://moq.dev/).
It uses [moq-rs](https://github.com/kixelated/moq) to transfer audio and video streams over iroh connections.
## Structure of the repository
* [**`web-transport-iroh`**](web-transport-iroh): Implements the [web-transport](https://github.com/kixelated/web-transport) traits for iroh connections
* [**`iroh-moq`**](iroh-moq): Adapters to create and accept [moq-lite](https://github.com/kixelated/moq/tree/main/rs/moq) sessions over iroh
* [**`iroh-live`**](iroh-live): Native capture, encoding and decoding of audio and video. This is an early preview of a high-level live streaming toolkit for iroh. Currently, it has these features, all subject to change:
* Support for [hang](https://github.com/kixelated/moq/blob/main/rs/hang/) catalogs in MoQ sessions
* Capture and playout audio (with [firewheel](https://github.com/BillyDM/Firewheel/))
* Capture camera (with [nokwha](https://github.com/l1npengtul/nokhwa/))
* Capture screens (with [xcap](https://github.com/nashaofu/xcap/))
* Encode and decode video (h264) and audio (Opus) using [ffmpeg](https://docs.rs/ffmpeg-next/latest/ffmpeg_next/). Video encoding is hardware accelerated on supported platforms.
* Support multiple renditions and on-demand switching of the encoding
There's still bugs and a lot of missing optimizations. This is an early, work-in-progress preview!
## Building
By default `ffmpeg` is dynamically linked. Enable the `static` feature to build ffmpeg from source and statically link it.
#### Build dependencies
*incomplete list, please file issues or PRs to expand this*
##### Linux
* For building with `static` feature: `apt install nasm pkg-config`
## Demo and examples
Check out the [`rooms`](iroh-live/examples/rooms.rs) example:
```
cargo run --release --example rooms
```
This will print a *room ticket*. Copy this to another device, and run:
```
cargo run --release --example rooms -- <TICKET>
```
Now you're chatting! With video and audio! Over iroh!
Use the `room-publish-file` example to publish a video form a file into a room.
There's also a [`publish`](iroh-live/examples/publish.rs) example (publish only, no GUI), and a [`watch`](iroh-live/examples/watch.rs) example (watch a stream from the publish example).
The examples use [`egui`](https://github.com/emilk/egui), however `iroh-live` is not coupled to any GUI framework and should work with anything that can render raw images to the screen.
## License
Copyright 2025 N0, INC.
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this project by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.

View file

@ -0,0 +1,94 @@
[package]
name = "iroh-live"
version = "0.1.0"
edition = "2024"
description = "audio and video live streaming over iroh"
authors = ["Franz Heinzmann <frando@n0.computer>"]
repository = "https://github.com/n0-computer/iroh-live"
license = "MIT OR Apache-2.0"
[dependencies]
anyhow = "1.0.100"
bytemuck = "1.24.0"
byte-unit = { version = "5.1", features = ["bit"] }
data-encoding = "2.9.0"
derive_more = { version = "2.0.1", features = ["display", "debug", "eq"] }
ffmpeg-next = { version = "8.0.0", default-features = false, features = ["device", "format", "filter", "software-resampling", "software-scaling"] }
ffmpeg-sys-next = { version = "8.0.1", optional = true }
firewheel = { version = "0.9.1", features = ["cpal", "peak_meter_node", "std", "stream_nodes", "cpal_resample_inputs"] }
hang = "0.9.0"
image = { version = "0.25.8", default-features = false }
iroh = "0.95.1"
iroh-gossip = "0.95.0"
iroh-moq = { path = "../iroh-moq" }
iroh-tickets = "0.2.0"
moq-lite = "0.10.1"
moq-media = { version = "0.1.0", path = "../moq-media" }
n0-error = { version = "0.1.2", features = ["anyhow"] }
n0-future = "0.3.1"
n0-watcher = "0.6.0"
nokhwa = { version = "0.10", features = [
"input-native",
"input-v4l",
"output-threaded",
] }
postcard = "1.1.3"
rand = "0.9.2"
serde = { version = "1.0.228", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
tokio = { version = "1.48.0", features = ["sync"] }
tokio-util = "0.7.17"
tracing = "0.1.41"
xcap = "0.8"
webrtc-audio-processing = { version = "0.5.0", features = ["bundled"] }
bytes = "1.11.0"
iroh-smol-kv = { git = "https://github.com/Frando/iroh-smol-kv", branch = "iroh-095", version = "0.3.1", default-features = false }
buf-list = "1.1.2"
[dev-dependencies]
clap = { version = "4.5", features = ["derive"] }
eframe = "0.33.0"
postcard = "1.1.3"
tokio = { version = "1.48.0", features = ["full"] }
tracing-subscriber = "0.3.20"
[features]
default = []
# Enable static build of ffmpeg
static = [
"ffmpeg-next/static",
"ffmpeg-next/build-lib-openssl",
"ffmpeg-next/build-license-version3",
"ffmpeg-next/build-lib-opus",
"ffmpeg-next/build-lib-x264",
"ffmpeg-next/build-license-gpl",
"dep:ffmpeg-sys-next",
]
[target.'cfg(target_os = "macos")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-videotoolbox",
"build-audiotoolbox",
] }
[target.'cfg(target_os = "linux")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-vaapi",
# "build-vulkan",
# "build-lib-libmfx",
] }
[target.'cfg(target_os = "windows")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-lib-d3d11va",
"build-lib-dxva2",
# "build-nvidia",
# "build-amf",
] }
[target.'cfg(target_os = "android")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
# "build-mediacodec",
] }

View file

@ -0,0 +1,3 @@
# iroh-live
See [../README.md](../README.md)

View file

@ -0,0 +1,143 @@
use std::{
path::{Path, PathBuf},
process::Stdio,
};
use bytes::BytesMut;
use clap::ValueEnum;
use moq_lite::BroadcastProducer;
use n0_error::Result;
use tokio::{
io::{AsyncRead, AsyncReadExt},
process::Command,
};
use tracing::info;
#[derive(ValueEnum, Debug, Clone, Default, Copy)]
pub enum ImportType {
#[default]
Cmaf,
AnnexB,
}
impl ImportType {
pub fn as_str(&self) -> &'static str {
match self {
ImportType::AnnexB => "annex-b",
ImportType::Cmaf => "cmaf",
}
}
}
// Taken from
// https://github.com/moq-dev/moq/blob/30c28b8c3b6bd941fe1279c0fd8855139a1d4f6a/rs/hang-cli/src/import.rs
// License: Apache-2.0
pub struct Import {
decoder: hang::import::Decoder,
buffer: BytesMut,
}
impl Import {
pub fn new(broadcast: BroadcastProducer, format: ImportType) -> Self {
let decoder = hang::import::Decoder::new(broadcast.into(), format.as_str())
.expect("supported format");
Self {
decoder,
buffer: BytesMut::new(),
}
}
}
impl Import {
pub async fn init_from<T: AsyncRead + Unpin>(&mut self, input: &mut T) -> anyhow::Result<()> {
while !self.decoder.is_initialized() && input.read_buf(&mut self.buffer).await? > 0 {
self.decoder.decode_stream(&mut self.buffer)?;
}
Ok(())
}
pub async fn read_from<T: AsyncRead + Unpin>(&mut self, input: &mut T) -> anyhow::Result<()> {
while input.read_buf(&mut self.buffer).await? > 0 {
self.decoder.decode_stream(&mut self.buffer)?;
}
// Flush the final frame.
self.decoder.decode_frame(&mut self.buffer, None)
}
}
pub async fn transcode(input: PathBuf, format: ImportType) -> Result<impl AsyncRead> {
let copy_video = is_h264(&input).await?;
let mut cmd = Command::new("ffmpeg");
cmd.args([
"-hide_banner",
"-loglevel",
"error",
"-stream_loop",
"-1",
"-re",
"-i",
]);
cmd.arg(input.as_os_str());
if copy_video {
info!("input is h264, copy video");
cmd.args(["-c:v", "copy"]);
} else {
info!("input is not h264, transcode");
cmd.args(["-c:v", "libx264", "-pix_fmt", "yuv420p"]);
}
match format {
ImportType::Cmaf => {
cmd.args(["-c:a", "libopus", "-b:a", "128k"]);
cmd.args([
"-movflags",
"cmaf+separate_moof+delay_moov+skip_trailer+frag_every_frame",
"-f",
"mp4",
]);
}
ImportType::AnnexB => {
cmd.args([
"-a",
"n",
"-bsf:v",
"h264_mp4toannexb",
"-f",
"h264",
"-movflags",
"cmaf+separate_moof+delay_moov+skip_trailer+frag_every_frame",
"-f",
"mp4",
]);
}
}
cmd.arg("-");
let mut child = cmd.stdout(Stdio::piped()).spawn()?;
let stdout = child.stdout.take().unwrap();
Ok(stdout)
}
pub async fn is_h264(input: &Path) -> Result<bool> {
let out = Command::new("ffprobe")
.args([
"-v",
"error",
"-select_streams",
"v:0",
"-show_entries",
"stream=codec_name",
"-of",
"default=nokey=1:noprint_wrappers=1",
])
.arg(input.as_os_str())
.output()
.await?;
Ok(String::from_utf8_lossy(&out.stdout).trim() == "h264")
}

View file

@ -0,0 +1 @@
pub mod import;

View file

@ -0,0 +1,95 @@
use clap::Parser;
use iroh::{Endpoint, SecretKey, protocol::Router};
use iroh_live::{
Live,
media::{
audio::AudioBackend,
av::{AudioPreset, VideoCodec, VideoPreset},
capture::CameraCapturer,
ffmpeg::{H264Encoder, OpusEncoder},
publish::{AudioRenditions, PublishBroadcast, VideoRenditions},
},
ticket::LiveTicket,
};
use n0_error::StdResultExt;
#[tokio::main]
async fn main() -> n0_error::Result {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
// Setup audio backend.
let audio_ctx = AudioBackend::new();
// Setup iroh and iroh-live.
let endpoint = Endpoint::builder()
.secret_key(secret_key_from_env()?)
.bind()
.await?;
let live = Live::new(endpoint.clone());
let router = Router::builder(endpoint)
.accept(iroh_live::ALPN, live.moq.protocol_handler())
.spawn();
// Create a publish broadcast.
let mut broadcast = PublishBroadcast::new();
// Capture audio, and encode with the cli-provided preset.
if !cli.no_audio {
let mic = audio_ctx.default_input().await?;
let audio = AudioRenditions::new::<OpusEncoder>(mic, [cli.audio_preset]);
broadcast.set_audio(Some(audio))?;
}
// Capture camera, and encode with the cli-provided presets.
if !cli.no_video {
let camera = CameraCapturer::new()?;
let video = VideoRenditions::new::<H264Encoder>(camera, cli.video_presets);
broadcast.set_video(Some(video))?;
}
// Publish under the name "hello".
let name = "hello";
live.publish(name, broadcast.producer()).await?;
// Create a ticket string and print
let ticket = LiveTicket::new(router.endpoint().id(), name);
println!("publishing at {ticket}");
let long_ticket = LiveTicket::new(router.endpoint().addr(), name);
println!("\nticket with addrs: {long_ticket}");
// Wait for ctrl-c and then shutdown.
tokio::signal::ctrl_c().await?;
live.shutdown();
router.shutdown().await.std_context("router shutdown")?;
Ok(())
}
#[derive(Parser, Debug)]
struct Cli {
#[arg(long, default_value_t=VideoCodec::H264)]
codec: VideoCodec,
#[arg(long, value_delimiter=',', default_values_t=[VideoPreset::P180, VideoPreset::P360, VideoPreset::P720, VideoPreset::P1080])]
video_presets: Vec<VideoPreset>,
#[arg(long, default_value_t=AudioPreset::Hq)]
audio_preset: AudioPreset,
#[arg(long)]
no_video: bool,
#[arg(long)]
no_audio: bool,
}
fn secret_key_from_env() -> n0_error::Result<SecretKey> {
Ok(match std::env::var("IROH_SECRET") {
Ok(key) => key.parse()?,
Err(_) => {
let key = SecretKey::generate(&mut rand::rng());
println!(
"Created new secret. Reuse with IROH_SECRET={}",
data_encoding::HEXLOWER.encode(&key.to_bytes())
);
key
}
})
}

View file

@ -0,0 +1,71 @@
use std::{path::PathBuf, pin::Pin};
use clap::Parser;
use iroh::EndpointId;
use iroh_live::LiveNode;
use moq_lite::BroadcastProducer;
use n0_error::Result;
use tokio::io::AsyncRead;
use tracing::warn;
mod common;
use self::common::import::{Import, ImportType, transcode};
#[derive(Debug, Parser)]
struct Cli {
#[clap(short, long)]
target: EndpointId,
#[clap(short, long, default_value = "anon/bbb")]
path: String,
/// The format of the input media.
#[clap(long, value_enum, default_value_t = ImportType::Cmaf)]
format: ImportType,
/// Input file.
#[clap(short, long)]
file: Option<PathBuf>,
/// Transcode the video with ffmpeg.
#[clap(long)]
transcode: bool,
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
let node = LiveNode::spawn_from_env().await?;
let session = node.live.connect(cli.target).await?;
let mut input: Pin<Box<dyn AsyncRead + Send + 'static>> = match (cli.file, cli.transcode) {
(Some(path), true) => Box::pin(transcode(path.clone(), cli.format).await?),
(Some(path), false) => Box::pin(tokio::fs::File::open(path).await?),
(None, false) => Box::pin(tokio::io::stdin()),
(None, true) => panic!("transcoding stdin is not supported"),
};
let broadcast = BroadcastProducer::default();
session.publish(cli.path, broadcast.consume());
let import = async move {
let mut import = Import::new(broadcast.into(), cli.format);
import.init_from(&mut input).await?;
import.read_from(&mut input).await?;
n0_error::Ok(())
};
tokio::pin!(import);
tokio::select! {
res = &mut import => {
if let Err(err) = res {
warn!("Import failed: {err:#}");
}
}
_ = tokio::signal::ctrl_c() => {}
};
drop(import);
node.shutdown().await?;
Ok(())
}

View file

@ -0,0 +1,75 @@
use std::{path::PathBuf, pin::Pin};
use clap::Parser;
use iroh_live::{LiveNode, rooms::RoomTicket};
use moq_lite::BroadcastProducer;
use n0_error::Result;
use tokio::io::AsyncRead;
use tracing::warn;
mod common;
use self::common::import::{Import, ImportType, transcode};
#[derive(Debug, Parser)]
struct Cli {
/// Room to join. If empty a new room will be created.
/// Will also be read from the IROH_LIVE_ROOM environment variable.
#[clap(short, long)]
room: Option<RoomTicket>,
/// The format of the input media.
#[clap(long, value_enum, default_value_t = ImportType::Cmaf)]
format: ImportType,
/// Input file. If empty reads from stdin.
file: Option<PathBuf>,
/// Transcode the video with ffmpeg.
#[clap(long)]
transcode: bool,
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
let ticket = match cli.room {
Some(ticket) => ticket,
None => RoomTicket::new_from_env()?,
};
let node = LiveNode::spawn_from_env().await?;
let room = node.join_room(ticket).await?;
let mut input: Pin<Box<dyn AsyncRead + Send + 'static>> = match (cli.file, cli.transcode) {
(Some(path), true) => Box::pin(transcode(path.clone(), cli.format).await?),
(Some(path), false) => Box::pin(tokio::fs::File::open(path).await?),
(None, false) => Box::pin(tokio::io::stdin()),
(None, true) => panic!("transcoding stdin is not supported"),
};
let broadcast = BroadcastProducer::default();
room.publish("file", broadcast.clone()).await?;
let import = async move {
let mut import = Import::new(broadcast.into(), cli.format);
import.init_from(&mut input).await?;
import.read_from(&mut input).await?;
n0_error::Ok(())
};
tokio::pin!(import);
tokio::select! {
res = &mut import => {
if let Err(err) = res {
warn!("Import failed: {err:#}");
}
}
_ = tokio::signal::ctrl_c() => {}
};
drop(import);
drop(room);
node.shutdown().await?;
Ok(())
}

View file

@ -0,0 +1,428 @@
use std::time::Duration;
use clap::Parser;
use eframe::egui::{self, Color32, Id, Vec2};
use iroh::{Endpoint, protocol::Router};
use iroh_gossip::{Gossip, TopicId};
use iroh_live::{
Live,
media::{
audio::AudioBackend,
av::{AudioPreset, VideoPreset},
capture::{CameraCapturer, ScreenCapturer},
ffmpeg::{FfmpegDecoders, FfmpegVideoDecoder, H264Encoder, OpusEncoder, ffmpeg_log_init},
publish::{AudioRenditions, PublishBroadcast, VideoRenditions},
subscribe::{AudioTrack, AvRemoteTrack, SubscribeBroadcast, WatchTrack},
},
moq::MoqSession,
rooms::{Room, RoomEvent, RoomTicket},
util::StatsSmoother,
};
use n0_error::{Result, StdResultExt, anyerr};
use tracing::{info, warn};
const BROADCAST_NAME: &str = "cam";
#[derive(Debug, Parser)]
struct Cli {
join: Option<RoomTicket>,
#[clap(long)]
screen: bool,
#[clap(long)]
no_audio: bool,
}
fn main() -> Result<()> {
tracing_subscriber::fmt::init();
ffmpeg_log_init();
let cli = Cli::parse();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap();
let audio_ctx = AudioBackend::new();
let (router, broadcast, room) = rt.block_on(setup(cli, audio_ctx.clone()))?;
let _guard = rt.enter();
eframe::run_native(
"IrohLive",
eframe::NativeOptions::default(),
Box::new(|cc| {
let app = App {
rt,
room,
peers: vec![],
self_video: broadcast
.watch_local(Default::default())
.map(|track| VideoView::new(&cc.egui_ctx, track, usize::MAX)),
router,
_broadcast: broadcast,
audio_ctx,
};
Ok(Box::new(app))
}),
)
.map_err(|err| anyerr!("eframe failed: {err:#}"))
}
async fn setup(cli: Cli, audio_ctx: AudioBackend) -> Result<(Router, PublishBroadcast, Room)> {
let endpoint = Endpoint::builder()
.secret_key(secret_key_from_env()?)
.bind()
.await?;
info!(endpoint_id=%endpoint.id(), "endpoint bound");
let gossip = Gossip::builder().spawn(endpoint.clone());
let live = Live::new(endpoint.clone());
let router = Router::builder(endpoint)
.accept(iroh_gossip::ALPN, gossip.clone())
.accept(iroh_moq::ALPN, live.protocol_handler())
.spawn();
// Publish ourselves.
let broadcast = {
let mut broadcast = PublishBroadcast::new();
if !cli.no_audio {
let mic = audio_ctx.default_input().await?;
let audio = AudioRenditions::new::<OpusEncoder>(mic, [AudioPreset::Hq]);
broadcast.set_audio(Some(audio))?;
}
let video = if cli.screen {
let screen = ScreenCapturer::new()?;
VideoRenditions::new::<H264Encoder>(screen, VideoPreset::all())
} else {
let camera = CameraCapturer::new()?;
VideoRenditions::new::<H264Encoder>(camera, VideoPreset::all())
};
broadcast.set_video(Some(video))?;
broadcast
};
let ticket = match cli.join {
None => RoomTicket::new(topic_id_from_env()?, vec![]),
Some(ticket) => ticket,
};
let room = Room::new(router.endpoint(), gossip, live, ticket).await?;
room.publish(BROADCAST_NAME, broadcast.producer()).await?;
println!("room ticket: {}", room.ticket());
Ok((router, broadcast, room))
}
struct App {
room: Room,
peers: Vec<RemoteTrackView>,
self_video: Option<VideoView>,
router: Router,
_broadcast: PublishBroadcast,
audio_ctx: AudioBackend,
rt: tokio::runtime::Runtime,
}
impl eframe::App for App {
fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) {
ctx.request_repaint_after(Duration::from_millis(30)); // min 30 fps
// Remove closed peers.
self.peers.retain(|track| !track.is_closed());
// Add newly subscribed peers.
while let Ok(event) = self.room.try_recv() {
match event {
RoomEvent::RemoteAnnounced { remote, broadcasts } => {
info!(
"peer announced: {} with broadcasts {broadcasts:?}",
remote.fmt_short(),
);
}
RoomEvent::RemoteConnected { session } => {
info!("peer connected: {}", session.conn().remote_id().fmt_short());
}
RoomEvent::BroadcastSubscribed { session, broadcast } => {
info!(
"subscribing to {}:{}",
session.remote_id(),
broadcast.broadcast_name()
);
let track = match self.rt.block_on(async {
let audio_out = self.audio_ctx.default_output().await?;
broadcast.watch_and_listen::<FfmpegDecoders>(audio_out, Default::default())
}) {
Ok(track) => track,
Err(err) => {
warn!("failed to add track: {err}");
continue;
}
};
self.peers
.push(RemoteTrackView::new(ctx, session, track, self.peers.len()));
}
}
}
egui::CentralPanel::default()
.frame(egui::Frame::new().inner_margin(0.0).outer_margin(0.0))
.show(ctx, |ui| {
ui.spacing_mut().item_spacing = egui::vec2(0.0, 0.0);
show_video_grid(ctx, ui, &mut self.peers);
// Render video preview of self
if let Some(self_view) = self.self_video.as_mut() {
let size = (200., 200.);
egui::Area::new(Id::new("self-video"))
.anchor(egui::Align2::RIGHT_BOTTOM, [-10.0, -10.0]) // 10px from the bottom-right edge
.order(egui::Order::Foreground)
.show(ui.ctx(), |ui| {
egui::Frame::new()
.fill(egui::Color32::from_rgba_unmultiplied(0, 0, 0, 128))
.corner_radius(8.0)
.show(ui, |ui| {
ui.set_width(size.0);
ui.set_height(size.1);
ui.add_sized(size, self_view.render_image(ctx, size.into()));
});
});
}
});
}
fn on_exit(&mut self, _gl: Option<&eframe::glow::Context>) {
let router = self.router.clone();
self.rt.block_on(async move {
if let Err(err) = router.shutdown().await {
warn!("shutdown error: {err:?}");
}
});
}
}
struct RemoteTrackView {
id: usize,
video: Option<VideoView>,
_audio_track: Option<AudioTrack>,
session: MoqSession,
broadcast: SubscribeBroadcast,
stats: StatsSmoother,
}
impl RemoteTrackView {
fn new(ctx: &egui::Context, session: MoqSession, track: AvRemoteTrack, id: usize) -> Self {
Self {
video: track.video.map(|video| VideoView::new(ctx, video, id)),
stats: StatsSmoother::new(),
broadcast: track.broadcast,
id,
_audio_track: track.audio,
session,
}
}
fn is_closed(&self) -> bool {
self.session.conn().close_reason().is_some()
}
fn render_image(
&mut self,
ctx: &egui::Context,
available_size: Vec2,
) -> Option<egui::Image<'_>> {
self.video
.as_mut()
.map(|video| video.render_image(ctx, available_size))
}
fn render_overlay_in_rect(&mut self, ui: &mut egui::Ui, rect: egui::Rect) {
let pos = rect.left_bottom() + egui::vec2(8.0, -8.0);
let overlay_id = egui::Id::new(("overlay", self.id));
egui::Area::new(overlay_id)
.order(egui::Order::Foreground)
.fixed_pos(pos)
.show(ui.ctx(), |ui| {
egui::Frame::new()
.fill(egui::Color32::from_rgba_unmultiplied(0, 0, 0, 128))
.corner_radius(3.0)
.show(ui, |ui| {
ui.spacing_mut().item_spacing = egui::vec2(8.0, 8.0);
ui.set_min_width(100.);
self.render_overlay(ui);
});
});
}
fn render_overlay(&mut self, ui: &mut egui::Ui) {
ui.vertical(|ui| {
let selected = self.video.as_ref().map(|v| v.track.rendition().to_owned());
egui::ComboBox::from_id_salt(format!("video{}", self.id))
.selected_text(selected.clone().unwrap_or_default())
.show_ui(ui, |ui| {
for name in self.broadcast.catalog().video_renditions() {
if ui
.selectable_label(selected.as_deref() == Some(name), name)
.clicked()
{
if let Ok(track) = self
.broadcast
.watch_rendition::<FfmpegVideoDecoder>(&Default::default(), name)
{
if let Some(video) = self.video.as_mut() {
video.set_track(track);
} else {
self.video = Some(VideoView::new(ui.ctx(), track, self.id))
}
}
}
}
});
let stats = self.stats.smoothed(|| self.session.conn().stats());
ui.label(format!(
"peer: {}",
self.session.conn().remote_id().fmt_short()
));
ui.label(format!("BW up: {}", stats.up.rate_str));
ui.label(format!("BW down: {}", stats.down.rate_str));
ui.label(format!("RTT: {}ms", stats.rtt.as_millis()));
});
}
}
struct VideoView {
track: WatchTrack,
size: egui::Vec2,
texture: egui::TextureHandle,
}
impl VideoView {
fn new(ctx: &egui::Context, track: WatchTrack, id: usize) -> Self {
let texture_name = format!("video-texture-{}", id);
let size = egui::vec2(100., 100.);
let color_image =
egui::ColorImage::filled([size.x as usize, size.y as usize], Color32::BLACK);
let texture = ctx.load_texture(&texture_name, color_image, egui::TextureOptions::default());
Self {
size,
texture,
track,
}
}
fn set_track(&mut self, track: WatchTrack) {
self.track = track;
}
fn render_image(&mut self, ctx: &egui::Context, available_size: Vec2) -> egui::Image<'_> {
let available_size = available_size.into();
if available_size != self.size {
self.size = available_size;
let ppp = ctx.pixels_per_point();
let w = (available_size.x * ppp) as u32;
let h = (available_size.y * ppp) as u32;
self.track.set_viewport(w, h);
}
if let Some(frame) = self.track.current_frame() {
let (w, h) = frame.img().dimensions();
let image = egui::ColorImage::from_rgba_unmultiplied(
[w as usize, h as usize],
frame.img().as_raw(),
);
self.texture.set(image, Default::default());
}
egui::Image::from_texture(&self.texture).shrink_to_fit()
}
}
/// Show `textures` as squares in a compact auto grid that fills the parent as much as
/// possible without breaking square aspect.
fn show_video_grid(ctx: &egui::Context, ui: &mut egui::Ui, videos: &mut [RemoteTrackView]) {
let n = videos.len();
if n == 0 {
return;
}
// Parent size were allowed to use
let avail = ui.available_size(); // egui docs recommend this for filling containers
// Choose columns ≈ ceil(sqrt(n)), rows to fit the rest
let cols = (n as f32).sqrt().ceil() as usize;
let rows = (n + cols - 1) / cols;
// Side length of each square in points (fill the limiting axis)
let cell = (avail.x / cols as f32).min(avail.y / rows as f32).floor();
let cell_size = [cell, cell];
// Compute the grids actual pixel footprint
let grid_w = cell * cols as f32;
let grid_h = cell * rows as f32;
// Center the grid in any leftover space
let pad_x = ((avail.x - grid_w) * 0.5).max(0.0);
let pad_y = ((avail.y - grid_h) * 0.5).max(0.0);
ui.add_space(pad_y);
ui.horizontal(|ui| {
ui.add_space(pad_x);
egui::Grid::new("image_grid")
.spacing(Vec2::ZERO) // no gaps; tiles butt together
.show(ui, |ui| {
let mut i = 0;
for _r in 0..rows {
for _c in 0..cols {
if i < n {
// Force exact square size for each image
if let Some(image) = videos[i].render_image(ctx, cell_size.into()) {
let response = ui.add_sized(cell_size, image);
let rect = response.rect;
videos[i].render_overlay_in_rect(ui, rect);
}
i += 1;
} else {
// Keep the grid rectangular when N isnt a multiple of cols
ui.allocate_exact_size(Vec2::splat(cell), egui::Sense::hover());
}
}
ui.end_row();
}
});
});
}
fn secret_key_from_env() -> n0_error::Result<iroh::SecretKey> {
Ok(match std::env::var("IROH_SECRET") {
Ok(key) => key.parse()?,
Err(_) => {
let key = iroh::SecretKey::generate(&mut rand::rng());
println!(
"Created new secret. Reuse with IROH_SECRET={}",
data_encoding::HEXLOWER.encode(&key.to_bytes())
);
key
}
})
}
fn topic_id_from_env() -> n0_error::Result<TopicId> {
Ok(match std::env::var("IROH_TOPIC") {
Ok(topic) => TopicId::from_bytes(
data_encoding::HEXLOWER
.decode(topic.as_bytes())
.std_context("invalid hex")?
.as_slice()
.try_into()
.std_context("invalid length")?,
),
Err(_) => {
let topic = TopicId::from_bytes(rand::random());
println!(
"Created new topic. Reuse with IROH_TOPIC={}",
data_encoding::HEXLOWER.encode(topic.as_bytes())
);
topic
}
})
}

View file

@ -0,0 +1,225 @@
use std::time::Duration;
use clap::Parser;
use eframe::egui::{self, Color32, Id, Vec2};
use iroh::{Endpoint, EndpointId};
use iroh_live::{
Live,
media::{
audio::AudioBackend,
ffmpeg::{FfmpegDecoders, FfmpegVideoDecoder, ffmpeg_log_init},
subscribe::{AudioTrack, SubscribeBroadcast, WatchTrack},
},
moq::MoqSession,
ticket::LiveTicket,
util::StatsSmoother,
};
use n0_error::{Result, anyerr};
use tracing::info;
#[derive(Debug, Parser)]
struct Cli {
#[clap(long, conflicts_with = "endpoint-id")]
ticket: Option<LiveTicket>,
#[clap(long, conflicts_with = "ticket", requires = "name")]
endpoint_id: Option<EndpointId>,
#[clap(long, conflicts_with = "ticket", requires = "endpoint-id")]
name: Option<String>,
}
fn main() -> Result<()> {
let cli = Cli::parse();
let ticket = match (cli.ticket, cli.endpoint_id, cli.name) {
(Some(ticket), None, None) => ticket,
(None, Some(endpoint_id), Some(name)) => LiveTicket::new(endpoint_id, name),
_ => {
eprintln!("Invalid arguments: Use either --ticket, or --endpoint and --name");
std::process::exit(1);
}
};
tracing_subscriber::fmt::init();
ffmpeg_log_init();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap();
let audio_ctx = AudioBackend::new();
println!("connecting to {ticket} ...");
let (endpoint, session, track) = rt.block_on({
let audio_ctx = audio_ctx.clone();
async move {
let endpoint = Endpoint::bind().await?;
let live = Live::new(endpoint.clone());
let audio_out = audio_ctx.default_output().await?;
let (session, track) = live
.watch_and_listen::<FfmpegDecoders>(
ticket.endpoint,
&ticket.broadcast_name,
audio_out,
Default::default(),
)
.await?;
println!("connected!");
n0_error::Ok((endpoint, session, track))
}
})?;
let _guard = rt.enter();
eframe::run_native(
"IrohLive",
eframe::NativeOptions::default(),
Box::new(|cc| {
let egui_ctx = cc.egui_ctx.clone();
rt.spawn(async move {
let _ = tokio::signal::ctrl_c().await;
egui_ctx.send_viewport_cmd(egui::ViewportCommand::Close);
// TODO: When the app is not visible, this will not trigger `update` immediately.
// See https://github.com/emilk/egui/issues/5112
egui_ctx.request_repaint();
});
let app = App {
video: track.video.map(|video| VideoView::new(&cc.egui_ctx, video)),
_audio_ctx: audio_ctx,
_audio: track.audio,
broadcast: track.broadcast,
session: session,
stats: StatsSmoother::new(),
endpoint,
rt,
};
Ok(Box::new(app))
}),
)
.map_err(|err| anyerr!("eframe failed: {err:#}"))
}
struct App {
video: Option<VideoView>,
_audio: Option<AudioTrack>,
_audio_ctx: AudioBackend,
endpoint: Endpoint,
session: MoqSession,
broadcast: SubscribeBroadcast,
stats: StatsSmoother,
rt: tokio::runtime::Runtime,
}
impl eframe::App for App {
fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) {
ctx.request_repaint_after(Duration::from_millis(30)); // min 30 fps
egui::CentralPanel::default()
.frame(egui::Frame::new().inner_margin(0.0).outer_margin(0.0))
.show(ctx, |ui| {
ui.spacing_mut().item_spacing = egui::vec2(0.0, 0.0);
let avail = ui.available_size();
if let Some(video) = self.video.as_mut() {
ui.add_sized(avail, video.render(ctx, avail));
}
egui::Area::new(Id::new("overlay"))
.anchor(egui::Align2::LEFT_BOTTOM, [8.0, -8.0])
.show(ctx, |ui| {
egui::Frame::new()
.fill(egui::Color32::from_rgba_unmultiplied(0, 0, 0, 128))
.corner_radius(3.0)
.show(ui, |ui| {
ui.spacing_mut().item_spacing = egui::vec2(8.0, 8.0);
ui.set_min_width(100.);
self.render_overlay(ctx, ui);
})
})
});
}
fn on_exit(&mut self, _gl: Option<&eframe::glow::Context>) {
info!("exit");
self.broadcast.shutdown();
self.session.close(0, b"bye");
let endpoint = self.endpoint.clone();
self.rt.block_on(async move {
endpoint.close().await;
info!("endpoint closed");
});
}
}
impl App {
fn render_overlay(&mut self, ctx: &egui::Context, ui: &mut egui::Ui) {
ui.vertical(|ui| {
let selected = self
.video
.as_ref()
.map(|video| video.track.rendition().to_owned());
egui::ComboBox::from_label("")
.selected_text(selected.clone().unwrap_or_default())
.show_ui(ui, |ui| {
for name in self.broadcast.catalog().video_renditions() {
if ui
.selectable_label(selected.as_deref() == Some(name), name)
.clicked()
{
if let Ok(track) = self
.broadcast
.watch_rendition::<FfmpegVideoDecoder>(&Default::default(), name)
{
self.video = Some(VideoView::new(ctx, track));
}
}
}
});
let stats = self.stats.smoothed(|| self.session.conn().stats());
ui.label(format!(
"peer: {}",
self.session.conn().remote_id().fmt_short()
));
ui.label(format!("BW up: {}", stats.up.rate_str));
ui.label(format!("BW down: {}", stats.down.rate_str));
ui.label(format!("RTT: {}ms", stats.rtt.as_millis()));
});
}
}
struct VideoView {
track: WatchTrack,
texture: egui::TextureHandle,
size: egui::Vec2,
}
impl VideoView {
fn new(ctx: &egui::Context, track: WatchTrack) -> Self {
let size = egui::vec2(100., 100.);
let color_image =
egui::ColorImage::filled([size.x as usize, size.y as usize], Color32::BLACK);
let texture = ctx.load_texture("video", color_image, egui::TextureOptions::default());
Self {
size,
texture,
track,
}
}
fn render(&mut self, ctx: &egui::Context, available_size: Vec2) -> egui::Image<'_> {
let available_size = available_size.into();
if available_size != self.size {
self.size = available_size;
let ppp = ctx.pixels_per_point();
let w = (available_size.x * ppp) as u32;
let h = (available_size.y * ppp) as u32;
self.track.set_viewport(w, h);
}
if let Some(frame) = self.track.current_frame() {
let (w, h) = frame.img().dimensions();
let image = egui::ColorImage::from_rgba_unmultiplied(
[w as usize, h as usize],
frame.img().as_raw(),
);
self.texture.set(image, Default::default());
}
egui::Image::from_texture(&self.texture).shrink_to_fit()
}
}

View file

@ -0,0 +1,15 @@
mod live;
mod node;
pub mod rooms;
pub mod ticket;
pub mod util;
pub use self::live::Live;
pub use self::node::LiveNode;
pub use iroh_moq as moq;
pub use iroh_moq::ALPN;
pub use hang::catalog;
pub use moq_media as media;

View file

@ -0,0 +1,62 @@
use iroh::{Endpoint, EndpointAddr};
use iroh_moq::{Moq, MoqProtocolHandler, MoqSession};
use moq_lite::BroadcastProducer;
use moq_media::{
av::{AudioSink, Decoders, PlaybackConfig},
subscribe::{AvRemoteTrack, SubscribeBroadcast},
};
use n0_error::Result;
use tracing::info;
#[derive(Clone)]
pub struct Live {
pub moq: Moq,
}
impl Live {
pub fn new(endpoint: Endpoint) -> Self {
Self {
moq: Moq::new(endpoint),
}
}
pub async fn connect(&self, remote: impl Into<EndpointAddr>) -> Result<MoqSession> {
self.moq.connect(remote).await
}
pub async fn connect_and_subscribe(
&self,
remote: impl Into<EndpointAddr>,
broadcast_name: &str,
) -> Result<(MoqSession, SubscribeBroadcast)> {
let mut session = self.connect(remote).await?;
info!(id=%session.conn().remote_id(), "new peer connected");
let broadcast = session.subscribe(broadcast_name).await?;
let broadcast = SubscribeBroadcast::new(broadcast_name.to_string(), broadcast).await?;
Ok((session, broadcast))
}
pub async fn watch_and_listen<D: Decoders>(
&self,
remote: impl Into<EndpointAddr>,
broadcast_name: &str,
audio_out: impl AudioSink,
config: PlaybackConfig,
) -> Result<(MoqSession, AvRemoteTrack)> {
let (session, broadcast) = self.connect_and_subscribe(remote, &broadcast_name).await?;
let track = broadcast.watch_and_listen::<D>(audio_out, config)?;
Ok((session, track))
}
pub fn protocol_handler(&self) -> MoqProtocolHandler {
self.moq.protocol_handler()
}
pub async fn publish(&self, name: impl ToString, producer: BroadcastProducer) -> Result<()> {
self.moq.publish(name, producer).await
}
pub fn shutdown(&self) {
self.moq.shutdown();
}
}

View file

@ -0,0 +1,72 @@
use crate::{
live::Live,
rooms::{Room, RoomTicket},
};
use iroh::{Endpoint, protocol::Router};
use iroh_gossip::Gossip;
use n0_error::{Result, StdResultExt};
use tracing::info;
#[derive(Clone)]
pub struct LiveNode {
router: Router,
pub live: Live,
pub gossip: Gossip,
}
impl LiveNode {
pub async fn spawn_from_env() -> Result<Self> {
let endpoint = Endpoint::builder()
.secret_key(secret_key_from_env()?)
.bind()
.await?;
info!(endpoint_id=%endpoint.id(), "endpoint bound");
let gossip = Gossip::builder().spawn(endpoint.clone());
let live = Live::new(endpoint.clone());
let router = Router::builder(endpoint)
.accept(iroh_gossip::ALPN, gossip.clone())
.accept(iroh_moq::ALPN, live.protocol_handler())
.spawn();
Ok(Self {
router,
gossip,
live,
})
}
pub async fn shutdown(&self) -> Result<()> {
self.live.shutdown();
self.router.shutdown().await.anyerr()
}
pub fn endpoint(&self) -> &Endpoint {
self.router.endpoint()
}
pub async fn join_room(&self, ticket: RoomTicket) -> Result<Room> {
Room::new(
self.endpoint(),
self.gossip.clone(),
self.live.clone(),
ticket,
)
.await
}
}
fn secret_key_from_env() -> n0_error::Result<iroh::SecretKey> {
Ok(match std::env::var("IROH_SECRET") {
Ok(key) => key.parse()?,
Err(_) => {
let key = iroh::SecretKey::generate(&mut rand::rng());
println!(
"Created new secret. Reuse with IROH_SECRET={}",
data_encoding::HEXLOWER.encode(&key.to_bytes())
);
key
}
})
}

View file

@ -0,0 +1,391 @@
use std::collections::HashSet;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use iroh::{Endpoint, EndpointId, SecretKey};
use iroh_gossip::Gossip;
use iroh_moq::MoqSession;
use iroh_smol_kv::{ExpiryConfig, Filter, SignedValue, Subscribe, SubscribeMode, WriteScope};
use moq_lite::BroadcastProducer;
use moq_media::subscribe::SubscribeBroadcast;
use n0_error::{Result, StdResultExt, anyerr};
use n0_future::FuturesUnordered;
use n0_future::{StreamExt, task::AbortOnDropHandle};
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc::{self, error::TryRecvError};
use tracing::{Instrument, debug, error_span, warn};
use crate::Live;
pub use self::publisher::{PublishOpts, RoomPublisherSync, StreamKind};
pub use self::ticket::RoomTicket;
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send + Sync + 'static>>;
mod publisher;
pub struct Room {
handle: RoomHandle,
events: mpsc::Receiver<RoomEvent>,
}
pub type RoomEvents = mpsc::Receiver<RoomEvent>;
#[derive(Clone)]
pub struct RoomHandle {
me: EndpointId,
ticket: RoomTicket,
tx: mpsc::Sender<ApiMessage>,
_actor_handle: Arc<AbortOnDropHandle<()>>,
}
impl RoomHandle {
pub fn ticket(&self) -> RoomTicket {
let mut ticket = self.ticket.clone();
ticket.bootstrap = vec![self.me];
ticket
}
pub async fn publish(&self, name: impl ToString, producer: BroadcastProducer) -> Result<()> {
self.tx
.send(ApiMessage::Publish {
name: name.to_string(),
producer: producer,
})
.await
.map_err(|_| anyerr!("room actor died"))
}
}
impl Room {
pub async fn new(
endpoint: &Endpoint,
gossip: Gossip,
live: Live,
ticket: RoomTicket,
) -> Result<Self> {
let endpoint_id = endpoint.id();
let (actor_tx, actor_rx) = mpsc::channel(16);
let (event_tx, event_rx) = mpsc::channel(16);
let actor = Actor::new(
endpoint.secret_key(),
live,
event_tx,
gossip,
ticket.clone(),
)
.await?;
let actor_task = tokio::task::spawn(
async move { actor.run(actor_rx).await }
.instrument(error_span!("RoomActor", id = ticket.topic_id.fmt_short())),
);
Ok(Self {
handle: RoomHandle {
ticket,
me: endpoint_id,
tx: actor_tx,
_actor_handle: Arc::new(AbortOnDropHandle::new(actor_task)),
},
events: event_rx,
})
}
pub async fn recv(&mut self) -> Result<RoomEvent> {
self.events.recv().await.std_context("sender stopped")
}
pub fn try_recv(&mut self) -> Result<RoomEvent, TryRecvError> {
self.events.try_recv()
}
pub fn ticket(&self) -> RoomTicket {
self.handle.ticket()
}
pub fn split(self) -> (RoomEvents, RoomHandle) {
(self.events, self.handle)
}
pub async fn publish(&self, name: impl ToString, producer: BroadcastProducer) -> Result<()> {
self.handle.publish(name, producer).await
}
}
enum ApiMessage {
Publish {
name: String,
producer: BroadcastProducer,
},
}
pub enum RoomEvent {
RemoteAnnounced {
remote: EndpointId,
broadcasts: Vec<String>,
},
RemoteConnected {
session: MoqSession,
},
BroadcastSubscribed {
session: MoqSession,
broadcast: SubscribeBroadcast,
},
}
const PEER_STATE_KEY: &[u8] = b"s";
#[derive(Debug, Clone, Serialize, Deserialize)]
struct PeerState {
broadcasts: Vec<String>,
}
type KvEntry = (EndpointId, Bytes, SignedValue);
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, derive_more::Display)]
#[display("{}:{}", _0.fmt_short(), _1)]
struct BroadcastId(EndpointId, String);
struct Actor {
me: EndpointId,
_gossip: Gossip,
live: Live,
active_subscribe: HashSet<BroadcastId>,
active_publish: HashSet<String>,
connecting:
FuturesUnordered<BoxFuture<(BroadcastId, Result<(MoqSession, SubscribeBroadcast)>)>>,
subscribe_closed: FuturesUnordered<BoxFuture<BroadcastId>>,
publish_closed: FuturesUnordered<BoxFuture<String>>,
event_tx: mpsc::Sender<RoomEvent>,
kv: iroh_smol_kv::Client,
kv_writer: WriteScope,
}
impl Actor {
async fn new(
me: &SecretKey,
live: Live,
event_tx: mpsc::Sender<RoomEvent>,
gossip: Gossip,
ticket: RoomTicket,
) -> Result<Self> {
let topic = gossip
.subscribe(ticket.topic_id, ticket.bootstrap.clone())
.await?;
let kv = iroh_smol_kv::Client::local(
topic,
iroh_smol_kv::Config {
anti_entropy_interval: Duration::from_secs(60),
fast_anti_entropy_interval: Duration::from_secs(1),
expiry: Some(ExpiryConfig {
check_interval: Duration::from_secs(10),
horizon: Duration::from_secs(60 * 2),
}),
},
);
let kv_writer = kv.write(me.clone());
Ok(Self {
me: me.public(),
live,
_gossip: gossip,
active_subscribe: Default::default(),
active_publish: Default::default(),
connecting: Default::default(),
subscribe_closed: Default::default(),
publish_closed: Default::default(),
event_tx,
kv,
kv_writer,
})
}
pub async fn run(mut self, mut inbox: mpsc::Receiver<ApiMessage>) {
let updates = self
.kv
.subscribe_with_opts(Subscribe {
mode: SubscribeMode::Both,
filter: Filter::ALL,
})
.stream();
tokio::pin!(updates);
loop {
tokio::select! {
Some(update) = updates.next() => {
match update {
Err(err) => warn!("gossip kv update failed: {err:#}"),
Ok(update) => self.handle_gossip_update(update).await,
}
}
msg = inbox.recv() => {
match msg {
None => break,
Some(msg) => self.handle_api_message(msg).await
}
}
Some((id, res)) = self.connecting.next(), if !self.connecting.is_empty() => {
match res {
Ok((session, broadcast)) => {
let closed_fut = broadcast.closed();
self.event_tx.send(RoomEvent::BroadcastSubscribed { session, broadcast }).await.ok();
self.subscribe_closed.push(Box::pin(async move {
closed_fut.await;
id
}))
}
Err(err) => {
self.active_subscribe.remove(&id);
warn!("Subscribing to broadcast {id} failed: {err:#}");
}
}
}
Some(id) = self.subscribe_closed.next(), if !self.subscribe_closed.is_empty() => {
debug!("broadcast closed: {id}");
self.active_subscribe.remove(&id);
}
Some(name) = self.publish_closed.next(), if !self.publish_closed.is_empty() => {
self.active_publish.remove(&name);
self.update_kv().await;
}
}
}
}
async fn handle_api_message(&mut self, msg: ApiMessage) {
match msg {
ApiMessage::Publish { name, producer } => {
let closed = producer.consume().closed();
self.live.publish(name.clone(), producer).await.ok();
self.active_publish.insert(name.clone());
self.publish_closed.push(Box::pin(async move {
closed.await;
name
}));
self.update_kv().await;
}
}
}
async fn handle_gossip_update(&mut self, entry: KvEntry) {
let (remote, key, value) = entry;
if remote == self.me || &key != PEER_STATE_KEY {
return;
}
let Ok(value) = postcard::from_bytes::<PeerState>(&value.value) else {
return;
};
let PeerState { broadcasts } = value;
for name in broadcasts.clone() {
let id = BroadcastId(remote, name.clone());
if !self.active_subscribe.insert(id.clone()) {
continue;
}
let live = self.live.clone();
self.connecting.push(Box::pin(async move {
let session = live.connect_and_subscribe(remote, &name).await;
(id, session)
}));
}
self.event_tx
.send(RoomEvent::RemoteAnnounced { remote, broadcasts })
.await
.ok();
}
async fn update_kv(&self) {
let state = PeerState {
broadcasts: self.active_publish.iter().cloned().collect(),
};
if let Err(err) = self
.kv_writer
.put(PEER_STATE_KEY, postcard::to_stdvec(&state).unwrap())
.await
{
warn!("failed to update gossip kv: {err:#}");
}
}
}
mod ticket {
use std::str::FromStr;
use iroh::EndpointId;
use iroh_gossip::TopicId;
use n0_error::{Result, StdResultExt};
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone, derive_more::Display)]
#[display("{}", iroh_tickets::Ticket::serialize(self))]
pub struct RoomTicket {
pub bootstrap: Vec<EndpointId>,
pub topic_id: TopicId,
}
impl RoomTicket {
pub fn new(topic_id: TopicId, bootstrap: impl IntoIterator<Item = EndpointId>) -> Self {
Self {
bootstrap: bootstrap.into_iter().collect(),
topic_id,
}
}
pub fn generate() -> Self {
Self {
bootstrap: vec![],
topic_id: TopicId::from_bytes(rand::random()),
}
}
pub fn new_from_env() -> Result<Self> {
if let Ok(value) = std::env::var("IROH_LIVE_ROOM") {
value
.parse()
.std_context("failed to parse ticket from IROH_LIVE_ROOM environment variable")
} else {
let topic_id = match std::env::var("IROH_LIVE_TOPIC") {
Ok(topic) => TopicId::from_bytes(
data_encoding::HEXLOWER
.decode(topic.as_bytes())
.std_context("invalid hex")?
.as_slice()
.try_into()
.std_context("invalid length")?,
),
Err(_) => {
let topic = TopicId::from_bytes(rand::random());
println!(
"Created new topic. Reuse with IROH_TOPIC={}",
data_encoding::HEXLOWER.encode(topic.as_bytes())
);
topic
}
};
Ok(Self::new(topic_id, vec![]))
}
}
}
impl FromStr for RoomTicket {
type Err = iroh_tickets::ParseError;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
iroh_tickets::Ticket::deserialize(s)
}
}
impl iroh_tickets::Ticket for RoomTicket {
const KIND: &'static str = "room";
fn to_bytes(&self) -> Vec<u8> {
postcard::to_stdvec(self).unwrap()
}
fn from_bytes(bytes: &[u8]) -> Result<Self, iroh_tickets::ParseError> {
let ticket = postcard::from_bytes(bytes)?;
Ok(ticket)
}
}
}

View file

@ -0,0 +1,199 @@
use std::sync::{Arc, Mutex};
use moq_lite::BroadcastProducer;
use moq_media::{
audio::AudioBackend,
av::{AudioPreset, VideoPreset},
capture::{CameraCapturer, ScreenCapturer},
ffmpeg::{H264Encoder, OpusEncoder},
publish::{AudioRenditions, PublishBroadcast, VideoRenditions},
};
use n0_error::{AnyError, Result};
use tracing::{info, warn};
use crate::rooms::RoomHandle;
#[derive(Debug, strum::Display, strum::EnumString)]
#[strum(serialize_all = "lowercase")]
enum Broadcasts {
Camera,
Screen,
}
#[derive(Debug)]
pub enum StreamKind {
Camera,
Screen,
Microphone,
}
#[derive(Default, Clone, Debug)]
pub struct PublishOpts {
pub camera: bool,
pub screen: bool,
pub audio: bool,
}
/// Manager for publish broadcasts in a room
///
/// Synchronous version which spawns all async ops on new tokio tasks. Panics if methods are
/// not called in the context of a tokio runtime.
///
/// Why does this have sync methods? In UI land it is so much easier for the operations to be sync,
/// so this just spawns all async ops on tokio threads. Not yet sure about where this should evolve to
/// but this kept me moving for now.
pub struct RoomPublisherSync {
audio_ctx: AudioBackend,
room: RoomHandle,
camera: Option<Arc<Mutex<PublishBroadcast>>>,
screen: Option<Arc<Mutex<PublishBroadcast>>>,
state: PublishOpts,
}
impl RoomPublisherSync {
pub fn new(room: RoomHandle, audio_ctx: AudioBackend) -> Self {
Self {
room,
audio_ctx,
camera: None,
screen: None,
state: Default::default(),
}
}
pub fn set_state(&mut self, state: &PublishOpts) -> Result<(), Vec<(StreamKind, AnyError)>> {
info!(new=?state, old=?self.state, "set publish state");
let errors = [
self.set_audio(state.audio)
.err()
.map(|e| (StreamKind::Microphone, e)),
self.set_camera(state.camera)
.err()
.map(|e| (StreamKind::Camera, e)),
self.set_screen(state.screen)
.err()
.map(|e| (StreamKind::Screen, e)),
]
.into_iter()
.flatten()
.collect::<Vec<_>>();
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
pub fn state(&self) -> &PublishOpts {
&self.state
}
pub fn camera(&self) -> bool {
self.state.camera
}
pub fn camera_broadcast(&self) -> Option<Arc<Mutex<PublishBroadcast>>> {
self.camera.clone()
}
pub fn screen_broadcast(&self) -> Option<Arc<Mutex<PublishBroadcast>>> {
self.screen.clone()
}
pub fn set_camera(&mut self, enable: bool) -> Result<()> {
if self.state.camera != enable {
if enable {
let camera = CameraCapturer::new()?;
let renditions = VideoRenditions::new::<H264Encoder>(camera, VideoPreset::all());
self.ensure_camera();
self.camera
.as_ref()
.unwrap()
.lock()
.unwrap()
.set_video(Some(renditions))?;
} else if let Some(camera) = self.camera.as_ref() {
camera.lock().unwrap().set_video(None)?;
}
self.state.camera = enable;
}
Ok(())
}
pub fn screen(&self) -> bool {
self.state.screen
}
fn ensure_camera(&mut self) {
if self.camera.is_none() {
let broadcast = PublishBroadcast::new();
self.publish(Broadcasts::Camera, broadcast.producer());
self.camera = Some(Arc::new(Mutex::new(broadcast)));
};
}
fn publish(&self, name: Broadcasts, producer: BroadcastProducer) {
let room = self.room.clone();
tokio::spawn(async move {
if let Err(err) = room.publish(name, producer).await {
warn!("publish to room failed: {err:#}");
}
});
}
pub fn set_screen(&mut self, enable: bool) -> Result<()> {
if self.state.screen != enable {
if enable {
if self.screen.is_none() {
let broadcast = PublishBroadcast::new();
self.publish(Broadcasts::Screen, broadcast.producer());
self.screen = Some(Arc::new(Mutex::new(broadcast)));
};
let screen = ScreenCapturer::new()?;
let renditions = VideoRenditions::new::<H264Encoder>(screen, VideoPreset::all());
self.screen
.as_mut()
.unwrap()
.lock()
.unwrap()
.set_video(Some(renditions))?;
} else {
let _ = self.screen.take();
}
self.state.screen = enable;
}
Ok(())
}
pub fn audio(&self) -> bool {
self.state.audio
}
pub fn set_audio(&mut self, enable: bool) -> Result<()> {
if self.state.audio != enable {
if enable {
self.ensure_camera();
let camera = self.camera.as_ref().unwrap().clone();
let audio_ctx = self.audio_ctx.clone();
tokio::spawn(async move {
let mic = match audio_ctx.default_input().await {
Err(err) => {
warn!("failed to open audio input: {err:#}");
return;
}
Ok(mic) => mic,
};
let renditions = AudioRenditions::new::<OpusEncoder>(mic, [AudioPreset::Hq]);
if let Err(err) = camera.lock().unwrap().set_audio(Some(renditions)) {
warn!("failed to set audio: {err:#}");
}
});
} else if let Some(camera) = self.camera.as_mut() {
camera.lock().unwrap().set_audio(None)?;
}
self.state.audio = enable;
}
Ok(())
}
}

View file

@ -0,0 +1,61 @@
use iroh::EndpointAddr;
use n0_error::{Result, StdResultExt};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, derive_more::Display, Serialize, Deserialize)]
#[display("{}", self.serialize())]
pub struct LiveTicket {
pub endpoint: EndpointAddr,
pub broadcast_name: String,
}
impl LiveTicket {
pub fn new(endpoint: impl Into<EndpointAddr>, broadcast_name: impl ToString) -> Self {
Self {
endpoint: endpoint.into(),
broadcast_name: broadcast_name.to_string(),
}
}
pub fn to_bytes(&self) -> Vec<u8> {
postcard::to_stdvec(self).unwrap()
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let ticket = postcard::from_bytes(bytes).std_context("failed to deserialize")?;
Ok(ticket)
}
/// Serialize to string.
pub fn serialize(&self) -> String {
let mut out = self.broadcast_name.clone();
out.push_str("@");
data_encoding::BASE32_NOPAD
.encode_append(&postcard::to_stdvec(&self.endpoint).unwrap(), &mut out);
out.to_ascii_lowercase()
}
/// Deserialize from a string.
pub fn deserialize(str: &str) -> Result<Self> {
let (broadcast_name, encoded_addr) = str
.split_once("@")
.std_context("invalid ticket: missing @")?;
let endpoint_addr: EndpointAddr = postcard::from_bytes(
&(data_encoding::BASE32_NOPAD_NOCASE
.decode(encoded_addr.as_bytes())
.std_context("invalid base32")?),
)
.std_context("failed to parse")?;
Ok(Self {
broadcast_name: broadcast_name.to_string(),
endpoint: endpoint_addr,
})
}
}
impl std::str::FromStr for LiveTicket {
type Err = n0_error::AnyError;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
LiveTicket::deserialize(s)
}
}

View file

@ -0,0 +1,84 @@
use std::time::{Duration, Instant};
use byte_unit::{Bit, UnitType};
use iroh::endpoint::ConnectionStats;
/// Spawn a named OS thread and panic if spawning fails.
pub fn spawn_thread<F, T>(name: impl ToString, f: F) -> std::thread::JoinHandle<T>
where
F: FnOnce() -> T + Send + 'static,
T: Send + 'static,
{
let name_str = name.to_string();
std::thread::Builder::new()
.name(name_str.clone())
.spawn(f)
.expect(&format!("failed to spawn thread: {}", name_str))
}
pub struct StatsSmoother {
rate_up: Rate,
rate_down: Rate,
last_update: Instant,
rtt: Duration,
}
impl StatsSmoother {
pub fn new() -> Self {
Self {
rate_up: Default::default(),
rate_down: Default::default(),
last_update: Instant::now(),
rtt: Duration::from_secs(0),
}
}
pub fn smoothed(&mut self, total: impl FnOnce() -> ConnectionStats) -> SmoothedStats<'_> {
let now = Instant::now();
let elapsed = now.duration_since(self.last_update);
if elapsed >= Duration::from_secs(1) {
let stats = (total)();
self.rate_down.update(elapsed, stats.udp_rx.bytes);
self.rate_up.update(elapsed, stats.udp_tx.bytes);
self.last_update = now;
self.rtt = stats.path.rtt;
}
SmoothedStats {
down: &self.rate_down,
up: &self.rate_up,
rtt: self.rtt,
}
}
}
#[derive(Debug, Clone, Default)]
pub struct Rate {
/// Total bytes
pub total: u64,
/// Rate in bytes per second
pub rate: f32,
/// Rate rendered as a string
pub rate_str: String,
}
impl Rate {
fn update(&mut self, delta_time: Duration, new_total: u64) {
let delta = new_total.saturating_sub(self.total);
let delta_secs = delta_time.as_secs_f32();
let rate = if delta_secs > 0.0 && delta > 0 {
(delta as f32 * 8.0) / delta_secs
} else {
0.0
};
let bit = Bit::from_f32(rate).unwrap();
let adjusted = bit.get_appropriate_unit(UnitType::Decimal);
self.rate = rate;
self.rate_str = format!("{adjusted:.2}/s");
self.total = new_total;
}
}
pub struct SmoothedStats<'a> {
pub rtt: Duration,
pub down: &'a Rate,
pub up: &'a Rate,
}

View file

@ -0,0 +1,19 @@
[package]
name = "iroh-moq"
version = "0.1.0"
edition = "2024"
description = "audio and video live streaming over iroh"
authors = ["Franz Heinzmann <frando@n0.computer>"]
repository = "https://github.com/n0-computer/iroh-live"
license = "MIT OR Apache-2.0"
[dependencies]
iroh = "0.96"
moq-lite = "0.10.1"
n0-error = { version = "0.1.2", features = ["anyhow"] }
n0-future = "0.3.1"
tokio = { version = "1.48.0", features = ["sync"] }
tokio-util = "0.7.17"
tracing = "0.1.41"
url = "2.5.7"
web-transport-iroh = { version = "0.1.0", path = "../web-transport-iroh" }

View file

@ -0,0 +1,446 @@
use std::{
collections::{HashMap, hash_map},
sync::Arc,
};
use iroh::{
Endpoint, EndpointAddr, EndpointId,
endpoint::{Connection, ConnectionError},
protocol::ProtocolHandler,
};
use moq_lite::{BroadcastConsumer, BroadcastProducer, OriginConsumer, OriginProducer};
use n0_error::{AnyError, Result, StdResultExt, anyerr, e, stack_error};
use n0_future::{
FuturesUnordered, StreamExt,
boxed::BoxFuture,
task::{AbortOnDropHandle, JoinSet},
};
use tokio::sync::{mpsc, oneshot};
use tokio_util::sync::CancellationToken;
use tracing::{Instrument, debug, error_span, info, instrument};
use web_transport_iroh::SessionError;
pub const ALPN: &[u8] = moq_lite::lite::ALPN.as_bytes();
#[stack_error(derive, add_meta, from_sources)]
#[allow(private_interfaces)]
pub enum Error {
#[error(transparent)]
Connect(iroh::endpoint::ConnectError),
#[error(transparent)]
Moq(#[error(source, std_err)] moq_lite::Error),
#[error(transparent)]
Server(#[error(source, std_err)] web_transport_iroh::ServerError),
#[error("internal consistency error")]
InternalConsistencyError(#[error(source)] LiveActorDiedError),
#[error("failed to perform request")]
Request(#[error(source, std_err)] iroh::endpoint::WriteError),
}
#[stack_error(derive, add_meta, from_sources)]
#[allow(private_interfaces)]
pub enum SubscribeError {
#[error("track was not announced")]
NotAnnounced,
#[error("track was closed")]
Closed,
#[error("session was closed")]
SessionClosed(#[error(source, std_err)] SessionError),
}
#[stack_error(derive)]
#[error("live actor died")]
struct LiveActorDiedError;
impl From<mpsc::error::SendError<ActorMessage>> for LiveActorDiedError {
fn from(_value: mpsc::error::SendError<ActorMessage>) -> Self {
Self
}
}
#[derive(Debug, Clone)]
pub struct Moq {
tx: mpsc::Sender<ActorMessage>,
shutdown_token: CancellationToken,
_actor_handle: Arc<AbortOnDropHandle<()>>,
}
impl Moq {
pub fn new(endpoint: Endpoint) -> Self {
let (tx, rx) = mpsc::channel(16);
let actor = Actor::new(endpoint);
let shutdown_token = actor.shutdown_token.clone();
let actor_task = n0_future::task::spawn(async move {
actor.run(rx).instrument(error_span!("LiveActor")).await
});
Self {
shutdown_token,
tx,
_actor_handle: Arc::new(AbortOnDropHandle::new(actor_task)),
}
}
pub fn protocol_handler(&self) -> MoqProtocolHandler {
MoqProtocolHandler {
tx: self.tx.clone(),
}
}
pub async fn publish(&self, name: impl ToString, producer: BroadcastProducer) -> Result<()> {
self.tx
.send(ActorMessage::PublishBroadcast {
broadcast_name: name.to_string(),
producer,
})
.await
.std_context("live actor died")?;
Ok(())
}
pub async fn published_broadcasts(&self) -> Vec<String> {
let (reply, reply_rx) = oneshot::channel();
if let Err(_) = self.tx.send(ActorMessage::GetPublished { reply }).await {
return vec![];
}
reply_rx.await.unwrap_or_default()
}
pub async fn connect(&self, remote: impl Into<EndpointAddr>) -> Result<MoqSession, AnyError> {
// MoqSession::connect(&self.endpoint, addr).await
let (reply, reply_rx) = oneshot::channel();
self.tx
.send(ActorMessage::Connect {
remote: remote.into(),
reply,
})
.await
.map_err(|_| LiveActorDiedError)?;
reply_rx
.await
.map_err(|_| LiveActorDiedError)?
.map_err(|err| anyerr!(err))
}
pub fn shutdown(&self) {
self.shutdown_token.cancel();
}
}
#[derive(Debug, Clone)]
pub struct MoqProtocolHandler {
tx: mpsc::Sender<ActorMessage>,
}
impl MoqProtocolHandler {
async fn handle_connection(&self, connection: Connection) -> Result<(), Error> {
info!(remote = %connection.remote_id().fmt_short(), "accepted");
let session = web_transport_iroh::Session::raw(connection);
let session = MoqSession::session_accept(session).await?;
self.tx
.send(ActorMessage::HandleSession { session })
.await
.map_err(LiveActorDiedError::from)?;
Ok(())
}
}
impl ProtocolHandler for MoqProtocolHandler {
async fn accept(&self, connection: Connection) -> Result<(), iroh::protocol::AcceptError> {
self.handle_connection(connection)
.await
.map_err(AnyError::from)?;
Ok(())
}
}
// TODO: resubscribing session?
// struct MoqSession2 {
// session: MoqSession,
// tx: mpsc::Sender<ActorMessage>,
// remote: EndpointAddr,
// }
// impl MoqSession2 {
// pub async fn subscribe(&mut self, name: &str) -> Result<BroadcastConsumer> {
// match self.session.subscribe(name).await {
// Ok(consumer) => return Ok(consumer),
// Err(err) => {
// warn!("first attempt to subscribe failed, retrying. reason: {err:#}");
// let (reply, reply_rx) = oneshot::channel();
// self.tx
// .send(ActorMessage::Connect {
// remote: self.remote.clone(),
// reply,
// })
// .await
// .map_err(|_| LiveActorDiedError)?;
// self.session = reply_rx
// .await
// .map_err(|_| LiveActorDiedError)?
// .map_err(|err| anyerr!(err))?;
// self.session.subscribe(name).await.map_err(Into::into)
// }
// }
// }
// }
#[derive(Clone)]
pub struct MoqSession {
wt_session: web_transport_iroh::Session,
publish: OriginProducer,
subscribe: OriginConsumer,
}
impl MoqSession {
#[instrument(skip_all, fields(remote=tracing::field::Empty))]
pub async fn connect(
endpoint: &Endpoint,
remote_addr: impl Into<EndpointAddr>,
) -> Result<Self, Error> {
let addr = remote_addr.into();
tracing::Span::current().record("remote", tracing::field::display(addr.id.fmt_short()));
let connection = endpoint.connect(addr, ALPN).await?;
let wt_session = web_transport_iroh::Session::raw(connection);
Self::session_connect(wt_session).await
}
pub async fn session_connect(wt_session: web_transport_iroh::Session) -> Result<Self, Error> {
let publish = moq_lite::Origin::produce();
let subscribe = moq_lite::Origin::produce();
// We can drop the moq_lite::Session, it spawns it tasks in the background anyway.
// If that changes and it becomes a guard, we should keep it around.
let _moq_session =
moq_lite::Session::connect(wt_session.clone(), publish.consumer, subscribe.producer)
.await?;
Ok(Self {
publish: publish.producer,
subscribe: subscribe.consumer,
wt_session,
})
}
pub async fn session_accept(wt_session: web_transport_iroh::Session) -> Result<Self, Error> {
let publish = moq_lite::Origin::produce();
let subscribe = moq_lite::Origin::produce();
// We can drop the moq_lite::Session, it spawns it tasks in the background anyway.
// If that changes and it becomes a guard, we should keep it around.
let _moq_session =
moq_lite::Session::accept(wt_session.clone(), publish.consumer, subscribe.producer)
.await?;
Ok(Self {
publish: publish.producer,
subscribe: subscribe.consumer,
wt_session,
})
}
pub fn remote_id(&self) -> EndpointId {
self.wt_session.remote_id()
}
pub fn conn(&self) -> &iroh::endpoint::Connection {
self.wt_session.conn()
}
pub async fn subscribe(&mut self, name: &str) -> Result<BroadcastConsumer, SubscribeError> {
if let Some(reason) = self.conn().close_reason() {
return Err(SessionError::from(reason).into());
}
if let Some(consumer) = self.subscribe.consume_broadcast(name) {
return Ok(consumer);
}
loop {
let res = tokio::select! {
res = self.subscribe.announced() => res,
reason = self.wt_session.closed() => {
return Err(reason.into())
}
};
let (path, consumer) = res.ok_or_else(|| e!(SubscribeError::NotAnnounced))?;
debug!("peer announced broadcast: {path}");
if path.as_str() == name {
return consumer.ok_or_else(|| e!(SubscribeError::Closed));
}
}
}
pub fn publish(&self, name: String, broadcast: BroadcastConsumer) {
self.publish.publish_broadcast(name, broadcast);
}
pub fn close(&self, error_code: u32, reason: &[u8]) {
self.wt_session.close(error_code, reason);
}
pub async fn closed(&self) -> web_transport_iroh::SessionError {
self.wt_session.closed().await
}
}
enum ActorMessage {
HandleSession {
session: MoqSession,
},
PublishBroadcast {
broadcast_name: BroadcastName,
producer: BroadcastProducer,
},
Connect {
remote: EndpointAddr,
reply: oneshot::Sender<Result<MoqSession, Arc<AnyError>>>,
},
GetPublished {
reply: oneshot::Sender<Vec<BroadcastName>>,
},
}
type BroadcastName = String;
#[derive()]
struct Actor {
endpoint: Endpoint,
shutdown_token: CancellationToken,
publishing: HashMap<BroadcastName, BroadcastProducer>,
publishing_closed_futs: FuturesUnordered<BoxFuture<BroadcastName>>,
sessions: HashMap<EndpointId, MoqSession>,
session_tasks: JoinSet<(EndpointId, Result<(), web_transport_iroh::SessionError>)>,
pending_connects: HashMap<EndpointId, Vec<oneshot::Sender<Result<MoqSession, Arc<AnyError>>>>>,
pending_connect_tasks: JoinSet<(EndpointId, Result<MoqSession, AnyError>)>,
}
impl Actor {
pub fn new(endpoint: Endpoint) -> Self {
Self {
endpoint,
shutdown_token: CancellationToken::new(),
publishing: Default::default(),
publishing_closed_futs: Default::default(),
sessions: Default::default(),
session_tasks: Default::default(),
pending_connects: Default::default(),
pending_connect_tasks: Default::default(),
}
}
pub async fn run(mut self, mut inbox: mpsc::Receiver<ActorMessage>) {
loop {
tokio::select! {
msg = inbox.recv() => {
match msg {
None => break,
Some(msg) => self.handle_message(msg)
}
}
Some(res) = self.session_tasks.join_next(), if !self.session_tasks.is_empty() => {
let (endpoint_id, res) = res.expect("session task panicked");
info!(remote=%endpoint_id.fmt_short(), "session closed: {res:?}");
self.sessions.remove(&endpoint_id);
}
Some(name) = self.publishing_closed_futs.next(), if !self.publishing_closed_futs.is_empty() => {
self.publishing.remove(&name);
}
Some(res) = self.pending_connect_tasks.join_next(), if !self.pending_connect_tasks.is_empty() => {
let (endpoint_id, res) = res.expect("connect task panicked");
match res {
Ok(session) => {
info!(remote=%endpoint_id.fmt_short(), "connected");
self.handle_incoming_session(session);
}
Err(err) => {
info!(remote=%endpoint_id.fmt_short(), "connect failed: {err:#}");
let replies = self.pending_connects.remove(&endpoint_id).into_iter().flatten();
let err = Arc::new(err);
for reply in replies {
reply.send(Err(err.clone())).ok();
}
}
}
}
}
}
}
fn handle_message(&mut self, msg: ActorMessage) {
match msg {
ActorMessage::HandleSession { session: msg } => self.handle_incoming_session(msg),
ActorMessage::PublishBroadcast {
broadcast_name: name,
producer,
} => self.handle_publish_broadcast(name, producer),
ActorMessage::Connect { remote, reply } => self.handle_connect(remote, reply),
ActorMessage::GetPublished { reply } => {
let names = self.publishing.keys().cloned().collect();
reply.send(names).ok();
}
}
}
fn handle_incoming_session(&mut self, session: MoqSession) {
tracing::info!("handle new incoming session");
let remote = session.remote_id();
for (name, producer) in self.publishing.iter() {
session.publish(name.to_string(), producer.consume());
}
self.sessions.insert(remote, session.clone());
for reply in self.pending_connects.remove(&remote).into_iter().flatten() {
reply.send(Ok(session.clone())).ok();
}
let shutdown = self.shutdown_token.child_token();
self.session_tasks.spawn(async move {
let res = tokio::select! {
_ = shutdown.cancelled() => {
session.close(0u32.into(), b"cancelled");
Ok(())
}
result = session.closed() => match result {
SessionError::ConnectionError(ConnectionError::LocallyClosed) => Ok(()),
err @ _ => Err(err)
},
};
(remote, res)
});
}
fn handle_publish_broadcast(&mut self, name: BroadcastName, producer: BroadcastProducer) {
for session in self.sessions.values_mut() {
session
.publish
.publish_broadcast(name.clone(), producer.consume());
}
let closed = producer.consume().closed();
self.publishing.insert(name.clone(), producer);
self.publishing_closed_futs.push(Box::pin(async move {
closed.await;
name
}));
}
fn handle_connect(
&mut self,
remote: EndpointAddr,
reply: oneshot::Sender<Result<MoqSession, Arc<AnyError>>>,
) {
let remote_id = remote.id;
if let Some(session) = self.sessions.get(&remote_id) {
reply.send(Ok(session.clone())).ok();
return;
}
match self.pending_connects.entry(remote_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(reply);
}
hash_map::Entry::Vacant(entry) => {
let endpoint = self.endpoint.clone();
self.pending_connect_tasks.spawn(async move {
let res = MoqSession::connect(&endpoint, remote)
.await
.map_err(Into::into);
(remote_id, res)
});
entry.insert(Default::default()).push(reply);
}
}
}
}

View file

@ -0,0 +1,88 @@
[package]
name = "moq-media"
version = "0.1.0"
edition = "2024"
description = "native audio and video capturing, playback, encoding, decoding"
authors = ["Franz Heinzmann <frando@n0.computer>"]
repository = "https://github.com/n0-computer/iroh-live"
license = "MIT OR Apache-2.0"
[dependencies]
anyhow = "1.0.100"
bytemuck = "1.24.0"
byte-unit = { version = "5.1", features = ["bit"] }
data-encoding = "2.9.0"
derive_more = { version = "2.0.1", features = ["display", "debug", "eq"] }
ffmpeg-next = { version = "8.0.0", default-features = false, features = ["device", "format", "filter", "software-resampling", "software-scaling"] }
ffmpeg-sys-next = { version = "8.0.1", optional = true }
firewheel = { version = "0.9.1", features = ["cpal", "peak_meter_node", "std", "stream_nodes", "cpal_resample_inputs"] }
hang = "0.9.0"
image = { version = "0.25.8", default-features = false }
moq-lite = "0.10.1"
n0-error = { version = "0.1.2", features = ["anyhow"] }
n0-future = "0.3.1"
n0-watcher = "0.6.0"
nokhwa = { version = "0.10", features = [
"input-native",
"input-v4l",
"output-threaded",
] }
postcard = "1.1.3"
rand = "0.9.2"
serde = { version = "1.0.228", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
tokio = { version = "1.48.0", features = ["sync"] }
tokio-util = "0.7.17"
tracing = "0.1.41"
xcap = "0.8"
webrtc-audio-processing = { version = "0.5.0", features = ["bundled"] }
bytes = "1.11.0"
buf-list = "1.1.2"
[dev-dependencies]
clap = { version = "4.5", features = ["derive"] }
eframe = "0.33.0"
postcard = "1.1.3"
tokio = { version = "1.48.0", features = ["full"] }
tracing-subscriber = "0.3.20"
[features]
default = []
# Enable static build of ffmpeg
static = [
"ffmpeg-next/static",
"ffmpeg-next/build-lib-openssl",
"ffmpeg-next/build-license-version3",
"ffmpeg-next/build-lib-opus",
"ffmpeg-next/build-lib-x264",
"ffmpeg-next/build-license-gpl",
"dep:ffmpeg-sys-next",
]
[target.'cfg(target_os = "macos")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-videotoolbox",
"build-audiotoolbox",
] }
[target.'cfg(target_os = "linux")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-vaapi",
# "build-vulkan",
# "build-lib-libmfx",
] }
[target.'cfg(target_os = "windows")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
"build-lib-d3d11va",
"build-lib-dxva2",
# "build-nvidia",
# "build-amf",
] }
[target.'cfg(target_os = "android")'.dependencies]
ffmpeg-sys-next = { version = "8.0.1", optional = true, features = [
# "build-mediacodec",
] }

View file

@ -0,0 +1,527 @@
use std::{
collections::HashMap,
sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
time::{Duration, Instant},
};
use anyhow::{Context, Result};
use firewheel::{
CpalConfig, CpalInputConfig, CpalOutputConfig, FirewheelConfig, FirewheelContext,
channel_config::{ChannelConfig, ChannelCount, NonZeroChannelCount},
dsp::volume::{DEFAULT_DB_EPSILON, DbMeterNormalizer},
graph::PortIdx,
node::NodeID,
nodes::{
peak_meter::{PeakMeterNode, PeakMeterSmoother, PeakMeterState},
stream::{
ResamplingChannelConfig,
reader::{StreamReaderConfig, StreamReaderNode, StreamReaderState},
writer::{StreamWriterConfig, StreamWriterNode, StreamWriterState},
},
},
};
use tokio::sync::{mpsc, mpsc::error::TryRecvError, oneshot};
use tracing::{debug, error, info, trace, warn};
use self::aec::{AecCaptureNode, AecProcessor, AecProcessorConfig, AecRenderNode};
use crate::{
av::{AudioFormat, AudioSink, AudioSinkHandle, AudioSource},
util::spawn_thread,
};
mod aec;
type StreamWriterHandle = Arc<Mutex<StreamWriterState>>;
type StreamReaderHandle = Arc<Mutex<StreamReaderState>>;
#[derive(Debug, Clone)]
pub struct AudioBackend {
tx: mpsc::Sender<DriverMessage>,
}
impl AudioBackend {
pub fn new() -> Self {
let (tx, rx) = mpsc::channel(32);
let _handle = spawn_thread("audiodriver", move || AudioDriver::new(rx).run());
Self { tx }
}
pub async fn default_input(&self) -> Result<InputStream> {
self.input(AudioFormat::mono_48k()).await
}
pub async fn input(&self, format: AudioFormat) -> Result<InputStream> {
let (reply, reply_rx) = oneshot::channel();
self.tx
.send(DriverMessage::InputStream { format, reply })
.await?;
let handle = reply_rx.await??;
Ok(InputStream { handle, format })
}
pub async fn default_output(&self) -> Result<OutputStream> {
self.output(AudioFormat::stereo_48k()).await
}
pub async fn output(&self, format: AudioFormat) -> Result<OutputStream> {
let (reply, reply_rx) = oneshot::channel();
self.tx
.send(DriverMessage::OutputStream { format, reply })
.await?;
let handle = reply_rx.await??;
Ok(handle)
}
}
#[derive(Clone)]
pub struct OutputStream {
handle: StreamWriterHandle,
paused: Arc<AtomicBool>,
peaks: Arc<Mutex<PeakMeterSmoother<2>>>,
normalizer: DbMeterNormalizer,
}
impl AudioSinkHandle for OutputStream {
fn is_paused(&self) -> bool {
self.paused.load(Ordering::Relaxed)
}
fn pause(&self) {
self.paused.store(true, Ordering::Relaxed);
self.handle.lock().expect("poisoned").pause_stream();
}
fn resume(&self) {
self.paused.store(false, Ordering::Relaxed);
self.handle.lock().expect("poisoned").resume();
}
fn toggle_pause(&self) {
let was_paused = self.paused.fetch_xor(true, Ordering::Relaxed);
if was_paused {
self.handle.lock().expect("poisoned").resume();
} else {
self.handle.lock().expect("poisoned").pause_stream();
}
}
fn smoothed_peak_normalized(&self) -> Option<f32> {
Some(
self.peaks
.lock()
.expect("poisoned")
.smoothed_peaks_normalized_mono(&self.normalizer),
)
}
}
impl AudioSink for OutputStream {
fn handle(&self) -> Box<dyn AudioSinkHandle> {
Box::new(self.clone())
}
fn format(&self) -> Result<AudioFormat> {
let info = self.handle.lock().expect("poisoned");
let sample_rate = info
.sample_rate()
.context("output stream misses sample rate")?
.get();
let channel_count = info.num_channels().get().get();
Ok(AudioFormat {
sample_rate,
channel_count,
})
}
fn push_samples(&mut self, samples: &[f32]) -> Result<()> {
let mut handle = self.handle.lock().unwrap();
// If this happens excessively in Release mode, you may want to consider
// increasing [`StreamWriterConfig::channel_config.latency_seconds`].
if handle.underflow_occurred() {
warn!("Underflow occured in stream writer node!");
}
// If this happens excessively in Release mode, you may want to consider
// increasing [`StreamWriterConfig::channel_config.capacity_seconds`]. For
// example, if you are streaming data from a network, you may want to
// increase the capacity to several seconds.
if handle.overflow_occurred() {
warn!("Overflow occured in stream writer node!");
}
// Wait until the node's processor is ready to receive data.
if handle.is_ready() {
// let expected_bytes =
// frame.samples() * frame.channels() as usize * core::mem::size_of::<f32>();
// let cpal_sample_data: &[f32] = bytemuck::cast_slice(&frame.data(0)[..expected_bytes]);
handle.push_interleaved(samples);
trace!("pushed samples {}", samples.len());
} else {
warn!("output handle is inactive")
}
Ok(())
}
}
impl OutputStream {
#[allow(unused)]
pub fn is_active(&self) -> bool {
self.handle.lock().expect("poisoned").is_active()
}
}
/// A simple AudioSource that reads from the default microphone via Firewheel.
#[derive(Clone)]
pub struct InputStream {
handle: StreamReaderHandle,
format: AudioFormat,
}
impl AudioSource for InputStream {
fn cloned_boxed(&self) -> Box<dyn AudioSource> {
Box::new(self.clone())
}
fn format(&self) -> AudioFormat {
self.format
}
fn pop_samples(&mut self, buf: &mut [f32]) -> Result<Option<usize>> {
use firewheel::nodes::stream::ReadStatus;
let mut handle = self.handle.lock().expect("poisoned");
match handle.read_interleaved(buf) {
Some(ReadStatus::Ok) => Ok(Some(buf.len())),
Some(ReadStatus::InputNotReady) => {
tracing::warn!("audio input not ready");
// Maintain pacing; still return a frame-sized buffer
Ok(Some(buf.len()))
}
Some(ReadStatus::UnderflowOccurred { num_frames_read }) => {
tracing::warn!(
"audio input underflow: {} frames missing",
buf.len() - num_frames_read
);
Ok(Some(buf.len()))
}
Some(ReadStatus::OverflowCorrected {
num_frames_discarded,
}) => {
tracing::warn!("audio input overflow: {num_frames_discarded} frames discarded");
Ok(Some(buf.len()))
}
None => {
tracing::warn!("audio input stream is inactive");
Ok(None)
}
}
}
}
#[derive(derive_more::Debug)]
enum DriverMessage {
OutputStream {
format: AudioFormat,
#[debug("Sender")]
reply: oneshot::Sender<Result<OutputStream>>,
},
InputStream {
format: AudioFormat,
#[debug("Sender")]
reply: oneshot::Sender<Result<StreamReaderHandle>>,
},
}
struct AudioDriver {
cx: FirewheelContext,
rx: mpsc::Receiver<DriverMessage>,
aec_processor: AecProcessor,
aec_render_node: NodeID,
aec_capture_node: NodeID,
peak_meters: HashMap<NodeID, Arc<Mutex<PeakMeterSmoother<2>>>>,
}
impl AudioDriver {
fn new(rx: mpsc::Receiver<DriverMessage>) -> Self {
let config = FirewheelConfig {
num_graph_inputs: ChannelCount::new(1).unwrap(),
..Default::default()
};
let mut cx = FirewheelContext::new(config);
info!("inputs: {:?}", cx.available_input_devices());
info!("outputs: {:?}", cx.available_output_devices());
let config = CpalConfig {
output: CpalOutputConfig {
#[cfg(target_os = "linux")]
device_name: Some("pipewire".to_string()),
..Default::default()
},
input: Some(CpalInputConfig {
#[cfg(target_os = "linux")]
device_name: Some("pipewire".to_string()),
fail_on_no_input: true,
..Default::default()
}),
};
cx.start_stream(config).unwrap();
info!(
"audio graph in: {:?}",
cx.node_info(cx.graph_in_node_id()).map(|x| &x.info)
);
info!(
"audio graph out: {:?}",
cx.node_info(cx.graph_out_node_id()).map(|x| &x.info)
);
cx.set_graph_channel_config(ChannelConfig {
num_inputs: ChannelCount::new(2).unwrap(),
num_outputs: ChannelCount::new(2).unwrap(),
});
let aec_processor = AecProcessor::new(AecProcessorConfig::stereo_in_out(), true)
.expect("failed to initialize AEC processor");
let aec_render_node = cx.add_node(AecRenderNode::default(), Some(aec_processor.clone()));
let aec_capture_node = cx.add_node(AecCaptureNode::default(), Some(aec_processor.clone()));
let layout = &[(0, 0), (1, 1)];
cx.connect(cx.graph_in_node_id(), aec_capture_node, layout, true)
.unwrap();
cx.connect(aec_render_node, cx.graph_out_node_id(), layout, true)
.unwrap();
Self {
cx,
rx,
aec_processor,
aec_render_node,
aec_capture_node,
peak_meters: Default::default(),
}
}
fn run(&mut self) {
const INTERVAL: Duration = Duration::from_millis(10);
const PEAK_UPDATE_INTERVAL: Duration = Duration::from_millis(40);
let mut last_delay: f64 = 0.;
let mut last_peak_update = Instant::now();
loop {
let tick = Instant::now();
if self.drain_messages().is_err() {
info!("closing audio driver: message channel closed");
break;
}
if let Err(e) = self.cx.update() {
error!("audio backend error: {:?}", &e);
// if let UpdateError::StreamStoppedUnexpectedly(_) = e {
// // Notify the stream node handles that the output stream has stopped.
// // This will automatically stop any active streams on the nodes.
// cx.node_state_mut::<StreamWriterState>(stream_writer_id)
// .unwrap()
// .stop_stream();
// cx.node_state_mut::<StreamReaderState>(stream_reader_id)
// .unwrap()
// .stop_stream();
// // The stream has stopped unexpectedly (i.e the user has
// // unplugged their headphones.)
// //
// // Typically you should start a new stream as soon as
// // possible to resume processing (event if it's a dummy
// // output device).
// //
// // In this example we just quit the application.
// break;
// }
}
if let Some(info) = self.cx.stream_info() {
let delay = info.input_to_output_latency_seconds;
if (last_delay - delay).abs() > (1. / 1000.) {
let delay_ms = (delay * 1000.) as u32;
info!("update processor delay to {delay_ms}ms");
self.aec_processor.set_stream_delay(delay_ms);
last_delay = delay;
}
}
// Update peak meters
let delta = last_peak_update.elapsed();
if delta > PEAK_UPDATE_INTERVAL {
for (id, smoother) in self.peak_meters.iter_mut() {
smoother.lock().expect("poisoned").update(
self.cx
.node_state::<PeakMeterState<2>>(*id)
.unwrap()
.peak_gain_db(DEFAULT_DB_EPSILON),
delta.as_secs_f32(),
);
}
last_peak_update = Instant::now();
}
std::thread::sleep(INTERVAL.saturating_sub(tick.elapsed()));
}
}
fn drain_messages(&mut self) -> Result<(), ()> {
loop {
match self.rx.try_recv() {
Err(TryRecvError::Disconnected) => {
info!("stopping audio thread: backend handle dropped");
break Err(());
}
Err(TryRecvError::Empty) => {
break Ok(());
}
Ok(message) => self.handle_message(message),
}
}
}
fn handle_message(&mut self, message: DriverMessage) {
debug!("handle {message:?}");
match message {
DriverMessage::OutputStream { format, reply } => {
let res = self
.output_stream(format)
.inspect_err(|err| warn!("failed to create audio output stream: {err:#}"));
reply.send(res).ok();
}
DriverMessage::InputStream { format, reply } => {
let res = self
.input_stream(format)
.inspect_err(|err| warn!("failed to create audio input stream: {err:#}"));
reply.send(res).ok();
}
}
}
fn output_stream(&mut self, format: AudioFormat) -> Result<OutputStream> {
let channel_count = format.channel_count;
let sample_rate = format.sample_rate;
// setup stream
let stream_writer_id = self.cx.add_node(
StreamWriterNode,
Some(StreamWriterConfig {
channels: NonZeroChannelCount::new(channel_count)
.context("channel count may not be zero")?,
..Default::default()
}),
);
let graph_out = self.aec_render_node;
// let graph_out_info = self
// .cx
// .node_info(graph_out)
// .context("missing audio output node")?;
let peak_meter_node = PeakMeterNode::<2> { enabled: true };
let peak_meter_id = self.cx.add_node(peak_meter_node.clone(), None);
let peak_meter_smoother =
Arc::new(Mutex::new(PeakMeterSmoother::<2>::new(Default::default())));
self.peak_meters
.insert(peak_meter_id, peak_meter_smoother.clone());
self.cx
.connect(peak_meter_id, graph_out, &[(0, 0), (1, 1)], true)
.unwrap();
let layout: &[(PortIdx, PortIdx)] = match channel_count {
0 => anyhow::bail!("audio stream has no channels"),
1 => &[(0, 0), (0, 1)],
_ => &[(0, 0), (1, 1)],
};
self.cx
.connect(stream_writer_id, peak_meter_id, layout, false)
.unwrap();
let output_stream_sample_rate = self.cx.stream_info().unwrap().sample_rate;
let event = self
.cx
.node_state_mut::<StreamWriterState>(stream_writer_id)
.unwrap()
.start_stream(
sample_rate.try_into().unwrap(),
output_stream_sample_rate,
ResamplingChannelConfig {
capacity_seconds: 3.,
..Default::default()
},
)
.unwrap();
info!("started output stream");
self.cx.queue_event_for(stream_writer_id, event.into());
// Wrap the handles in an `Arc<Mutex<T>>>` so that we can send them to other threads.
let handle = self
.cx
.node_state::<StreamWriterState>(stream_writer_id)
.unwrap()
.handle();
Ok(OutputStream {
handle: Arc::new(handle),
paused: Arc::new(AtomicBool::new(false)),
peaks: peak_meter_smoother,
normalizer: DbMeterNormalizer::new(-60., 0., -20.),
})
}
fn input_stream(&mut self, format: AudioFormat) -> Result<StreamReaderHandle> {
let sample_rate = format.sample_rate;
let channel_count = format.channel_count;
// Setup stream reader node
let stream_reader_id = self.cx.add_node(
StreamReaderNode,
Some(StreamReaderConfig {
channels: NonZeroChannelCount::new(channel_count)
.context("channel count may not be zero")?,
..Default::default()
}),
);
let graph_in_node_id = self.aec_capture_node;
let graph_in_info = self
.cx
.node_info(graph_in_node_id)
.context("missing audio input node")?;
let layout: &[(PortIdx, PortIdx)] = match (
graph_in_info.info.channel_config.num_outputs.get(),
channel_count,
) {
(0, _) => anyhow::bail!("audio input has no channels"),
(1, 2) => &[(0, 0), (0, 1)],
(2, 2) => &[(0, 0), (1, 1)],
(_, 1) => &[(0, 0)],
_ => &[(0, 0), (1, 1)],
};
self.cx
.connect(graph_in_node_id, stream_reader_id, layout, false)
.unwrap();
let input_stream_sample_rate = self.cx.stream_info().unwrap().sample_rate;
let event = self
.cx
.node_state_mut::<StreamReaderState>(stream_reader_id)
.unwrap()
.start_stream(
sample_rate.try_into().unwrap(),
input_stream_sample_rate,
ResamplingChannelConfig {
capacity_seconds: 3.0,
..Default::default()
},
)
.unwrap();
self.cx.queue_event_for(stream_reader_id, event.into());
let handle = self
.cx
.node_state::<StreamReaderState>(stream_reader_id)
.unwrap()
.handle();
Ok(Arc::new(handle))
}
}

View file

@ -0,0 +1,452 @@
pub use self::{
firewheel_nodes::{AecCaptureNode, AecRenderNode},
processor::{AecProcessor, AecProcessorConfig},
};
mod processor {
use std::{
num::NonZeroU32,
sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
};
use anyhow::Result;
use tracing::{debug, info};
use webrtc_audio_processing::{
Config, EchoCancellation, EchoCancellationSuppressionLevel, InitializationConfig,
};
#[derive(Debug, Clone)]
pub struct AecProcessorConfig {
pub num_input_channels: NonZeroU32,
pub num_output_channels: NonZeroU32,
}
impl Default for AecProcessorConfig {
fn default() -> Self {
Self {
num_input_channels: 2.try_into().unwrap(),
num_output_channels: 2.try_into().unwrap(),
}
}
}
impl AecProcessorConfig {
pub fn stereo_in_out() -> Self {
Self::default()
}
}
#[derive(Clone, Debug)]
pub struct AecProcessor(Arc<Inner>);
#[derive(derive_more::Debug)]
struct Inner {
#[debug("Processor")]
processor: Mutex<webrtc_audio_processing::Processor>,
config: Mutex<Config>,
// capture_delay: AtomicU64,
// playback_delay: AtomicU64,
enabled: AtomicBool,
// capture_channels: AtomicUsize,
// playback_channels: AtomicUsize,
}
impl Default for AecProcessor {
fn default() -> Self {
Self::new(Default::default(), true).expect("failed to initialize AecProcessor")
}
}
impl AecProcessor {
pub fn new(config: AecProcessorConfig, enabled: bool) -> anyhow::Result<Self> {
let suppression_level = EchoCancellationSuppressionLevel::High;
// High pass filter is a prerequisite to running echo cancellation.
let processor_config = Config {
echo_cancellation: Some(EchoCancellation {
suppression_level,
stream_delay_ms: None,
enable_delay_agnostic: true,
enable_extended_filter: true,
}),
enable_high_pass_filter: true,
..Config::default()
};
let mut processor = webrtc_audio_processing::Processor::new(&InitializationConfig {
num_capture_channels: config.num_input_channels.get() as i32,
num_render_channels: config.num_output_channels.get() as i32,
enable_experimental_agc: true,
enable_intelligibility_enhancer: true, // ..InitializationConfig::default()
})?;
processor.set_config(processor_config.clone());
// processor.set_config(config.clone());
info!("init audio processor (config={config:?})");
Ok(Self(Arc::new(Inner {
processor: Mutex::new(processor),
config: Mutex::new(processor_config),
enabled: AtomicBool::new(enabled),
})))
}
pub fn is_enabled(&self) -> bool {
self.0.enabled.load(Ordering::SeqCst)
}
#[allow(unused)]
pub fn set_enabled(&self, enabled: bool) {
let _prev = self.0.enabled.swap(enabled, Ordering::SeqCst);
}
/// Processes and modifies the audio frame from a capture device by applying
/// signal processing as specified in the config. `frame` should hold an
/// interleaved f32 audio frame, with [`NUM_SAMPLES_PER_FRAME`] samples.
// webrtc-audio-processing expects a 10ms chunk for each process call.
pub fn process_capture_frame(
&self,
frame: &mut [f32],
) -> Result<(), webrtc_audio_processing::Error> {
if !self.is_enabled() {
return Ok(());
}
self.0
.processor
.lock()
.expect("poisoned")
.process_capture_frame(frame)
}
/// Processes and optionally modifies the audio frame from a playback device.
/// `frame` should hold an interleaved `f32` audio frame, with
/// [`NUM_SAMPLES_PER_FRAME`] samples.
pub fn process_render_frame(
&self,
frame: &mut [f32],
) -> Result<(), webrtc_audio_processing::Error> {
if !self.is_enabled() {
return Ok(());
}
self.0
.processor
.lock()
.expect("poisoned")
.process_render_frame(frame)
}
pub fn set_stream_delay(&self, delay_ms: u32) {
debug!("updating stream delay to {delay_ms}ms");
// let playback = self.0.playback_delay.load(Ordering::Relaxed);
// let capture = self.0.capture_delay.load(Ordering::Relaxed);
// let total = playback + capture;
let mut config = self.0.config.lock().expect("poisoned");
config.echo_cancellation.as_mut().unwrap().stream_delay_ms = Some(delay_ms as i32);
self.0
.processor
.lock()
.expect("poisoned")
.set_config(config.clone());
}
}
}
mod firewheel_nodes {
use std::collections::VecDeque;
use firewheel::{
StreamInfo,
channel_config::{ChannelConfig, ChannelCount},
diff::{Diff, Patch},
event::ProcEvents,
node::{
AudioNode, AudioNodeInfo, AudioNodeProcessor, ConstructProcessorContext, ProcBuffers,
ProcExtra, ProcInfo, ProcStreamCtx, ProcessStatus,
},
};
use webrtc_audio_processing::NUM_SAMPLES_PER_FRAME;
use super::AecProcessor;
const CHANNELS: usize = 2;
const FRAME_SAMPLES: usize = (NUM_SAMPLES_PER_FRAME as usize) * CHANNELS;
/// Simple render-side node: feeds output audio into WebRTC's render stream.
#[derive(Diff, Patch, Debug, Clone, Copy, PartialEq)]
pub struct AecRenderNode {
pub enabled: bool,
}
impl Default for AecRenderNode {
fn default() -> Self {
Self { enabled: true }
}
}
impl AudioNode for AecRenderNode {
/// We use the wrapped WebRTC processor as our configuration object.
///
/// Note: `WebrtcAudioProcessor` already internally wraps an `Arc<Inner>`,
/// so cloning this config shares the underlying processor between nodes.
type Configuration = AecProcessor;
fn info(&self, _config: &Self::Configuration) -> AudioNodeInfo {
AudioNodeInfo::new()
.debug_name("webrtc_render")
.channel_config(ChannelConfig {
num_inputs: ChannelCount::STEREO,
num_outputs: ChannelCount::STEREO,
})
}
fn construct_processor(
&self,
config: &Self::Configuration,
_cx: ConstructProcessorContext,
) -> impl AudioNodeProcessor {
// Clone = share the same underlying Arc<Inner>.
let webrtc = config.clone();
// Inform the processor how many playback channels we have.
// (You can handle errors here instead of unwrap() in real code.)
// webrtc.init_playback(CHANNELS).ok();
RenderProcessor {
enabled: self.enabled,
processor: webrtc,
in_ring: VecDeque::with_capacity(FRAME_SAMPLES * 4),
out_ring: VecDeque::with_capacity(FRAME_SAMPLES * 4),
tmp_chunk: vec![0.0; FRAME_SAMPLES],
}
}
}
struct RenderProcessor {
enabled: bool,
processor: AecProcessor,
// Interleaved input samples to be fed into WebRTC in 10ms chunks.
in_ring: VecDeque<f32>,
// Interleaved processed samples coming back from WebRTC.
out_ring: VecDeque<f32>,
// Scratch buffer for one NUM_SAMPLES_PER_FRAME chunk (interleaved).
tmp_chunk: Vec<f32>,
}
impl AudioNodeProcessor for RenderProcessor {
fn process(
&mut self,
info: &ProcInfo,
buffers: ProcBuffers,
events: &mut ProcEvents,
_extra: &mut ProcExtra,
) -> ProcessStatus {
// Handle parameter patches.
for patch in events.drain_patches::<AecRenderNode>() {
match patch {
AecRenderNodePatch::Enabled(enabled) => {
self.enabled = enabled;
if !self.enabled {
// Clear any buffered state when disabling to avoid stale audio.
self.in_ring.clear();
self.out_ring.clear();
}
}
}
}
let num_frames = info.frames as usize;
// println!("num_frames: {num_frames}");
// Get input/output slices like in the FilterNode example.
let in_l = &buffers.inputs[0][..num_frames];
let in_r = &buffers.inputs[1][..num_frames];
let (out_l, out_rest) = buffers.outputs.split_first_mut().unwrap();
let out_l = &mut out_l[..num_frames];
let out_r = &mut out_rest[0][..num_frames];
// If disabled, just pass through.
if !self.enabled {
out_l.copy_from_slice(in_l);
out_r.copy_from_slice(in_r);
return ProcessStatus::OutputsModified;
}
// 1. Push current block into the interleaved input ring buffer.
for i in 0..num_frames {
self.in_ring.push_back(in_l[i]);
self.in_ring.push_back(in_r[i]);
}
// 2. While we have at least one full 10ms frame, process it.
while self.in_ring.len() >= FRAME_SAMPLES {
// Fill tmp_chunk with a full frame of interleaved samples.
for s in &mut self.tmp_chunk[..FRAME_SAMPLES] {
*s = self.in_ring.pop_front().unwrap();
}
// Feed into processor render stream.
let _ = self.processor.process_render_frame(&mut self.tmp_chunk);
// Store processed samples into the output ring.
for &s in &self.tmp_chunk[..FRAME_SAMPLES] {
self.out_ring.push_back(s);
}
}
// 3. Produce outputs for this audio block.
//
// We always need `num_frames * CHANNELS` samples. If we don't have
// enough processed samples yet, we output silence for the missing part.
for i in 0..num_frames {
if self.out_ring.len() >= CHANNELS {
out_l[i] = self.out_ring.pop_front().unwrap();
out_r[i] = self.out_ring.pop_front().unwrap();
} else {
// Not enough processed data yet -> output silence.
out_l[i] = 0.0;
out_r[i] = 0.0;
}
}
ProcessStatus::OutputsModified
}
fn new_stream(&mut self, _stream_info: &StreamInfo, _ctx: &mut ProcStreamCtx) {
// Reset buffers for new stream.
self.in_ring.clear();
self.out_ring.clear();
}
}
/// Capture-side node: feeds mic audio into [`AecProcessor`]'s capture stream.
#[derive(Diff, Patch, Debug, Clone, Copy, PartialEq)]
pub struct AecCaptureNode {
pub enabled: bool,
}
impl Default for AecCaptureNode {
fn default() -> Self {
Self { enabled: true }
}
}
impl AudioNode for AecCaptureNode {
type Configuration = AecProcessor;
fn info(&self, _config: &Self::Configuration) -> AudioNodeInfo {
AudioNodeInfo::new()
.debug_name("webrtc_capture")
.channel_config(ChannelConfig {
num_inputs: ChannelCount::STEREO,
num_outputs: ChannelCount::STEREO,
})
}
fn construct_processor(
&self,
config: &Self::Configuration,
_cx: ConstructProcessorContext,
) -> impl AudioNodeProcessor {
CaptureProcessor {
enabled: self.enabled,
processor: config.clone(),
in_ring: VecDeque::with_capacity(FRAME_SAMPLES * 4),
out_ring: VecDeque::with_capacity(FRAME_SAMPLES * 4),
tmp_chunk: vec![0.0; FRAME_SAMPLES],
}
}
}
struct CaptureProcessor {
enabled: bool,
processor: AecProcessor,
// Interleaved input samples to be fed into WebRTC in 10ms chunks.
in_ring: VecDeque<f32>,
// Interleaved processed samples coming back from WebRTC.
out_ring: VecDeque<f32>,
// Scratch buffer for one NUM_SAMPLES_PER_FRAME chunk (interleaved).
tmp_chunk: Vec<f32>,
}
impl AudioNodeProcessor for CaptureProcessor {
fn process(
&mut self,
info: &ProcInfo,
buffers: ProcBuffers,
events: &mut ProcEvents,
_extra: &mut ProcExtra,
) -> ProcessStatus {
for patch in events.drain_patches::<AecCaptureNode>() {
match patch {
AecCaptureNodePatch::Enabled(enabled) => {
self.enabled = enabled;
if !self.enabled {
self.in_ring.clear();
self.out_ring.clear();
}
}
}
}
let frames = info.frames;
let num_frames = frames as usize;
let in_l = &buffers.inputs[0][..num_frames];
let in_r = &buffers.inputs[1][..num_frames];
let (out_l, out_rest) = buffers.outputs.split_first_mut().unwrap();
let out_l = &mut out_l[..num_frames];
let out_r = &mut out_rest[0][..num_frames];
if !self.enabled {
// Bypass if disabled.
out_l.copy_from_slice(in_l);
out_r.copy_from_slice(in_r);
return ProcessStatus::OutputsModified;
}
// 1. Push current block into the interleaved input ring buffer.
for i in 0..num_frames {
self.in_ring.push_back(in_l[i]);
self.in_ring.push_back(in_r[i]);
}
// 2. While we have at least one full 10ms frame, process it.
while self.in_ring.len() >= FRAME_SAMPLES {
for s in &mut self.tmp_chunk[..FRAME_SAMPLES] {
*s = self.in_ring.pop_front().unwrap();
}
let _ = self.processor.process_capture_frame(&mut self.tmp_chunk);
for &s in &self.tmp_chunk[..FRAME_SAMPLES] {
self.out_ring.push_back(s);
}
}
// 3. Produce outputs for this audio block.
//
// If we don't have enough processed samples to cover the whole block,
// we output silence for the missing frames.
for i in 0..num_frames {
if self.out_ring.len() >= CHANNELS {
out_l[i] = self.out_ring.pop_front().unwrap();
out_r[i] = self.out_ring.pop_front().unwrap();
} else {
out_l[i] = 0.0;
out_r[i] = 0.0;
}
}
ProcessStatus::OutputsModified
}
fn new_stream(&mut self, _stream_info: &StreamInfo, _ctx: &mut ProcStreamCtx) {
// Reset state for new stream.
self.in_ring.clear();
self.out_ring.clear();
}
}
}

View file

@ -0,0 +1,265 @@
use std::time::Duration;
use anyhow::Result;
use image::RgbaImage;
use strum::{Display, EnumString, VariantNames};
#[derive(Copy, Clone, Debug)]
pub struct AudioFormat {
pub sample_rate: u32,
pub channel_count: u32,
}
impl AudioFormat {
pub fn mono_48k() -> Self {
Self {
sample_rate: 48_000,
channel_count: 1,
}
}
pub fn stereo_48k() -> Self {
Self {
sample_rate: 48_000,
channel_count: 2,
}
}
pub fn from_hang_config(config: &hang::catalog::AudioConfig) -> Self {
Self {
channel_count: config.channel_count,
sample_rate: config.sample_rate,
}
}
}
pub trait Decoders {
type Audio: AudioDecoder;
type Video: VideoDecoder;
}
pub trait AudioSource: Send + 'static {
fn cloned_boxed(&self) -> Box<dyn AudioSource>;
fn format(&self) -> AudioFormat;
fn pop_samples(&mut self, buf: &mut [f32]) -> Result<Option<usize>>;
}
pub trait AudioSink: AudioSinkHandle {
fn format(&self) -> Result<AudioFormat>;
fn push_samples(&mut self, buf: &[f32]) -> Result<()>;
fn handle(&self) -> Box<dyn AudioSinkHandle>;
}
pub trait AudioSinkHandle: Send + 'static {
fn pause(&self);
fn resume(&self);
fn is_paused(&self) -> bool;
fn toggle_pause(&self);
/// Smoothed peak, normalized to 0..1.
// TODO: document how smoothing and normalization are expected
fn smoothed_peak_normalized(&self) -> Option<f32> {
None
}
}
pub trait AudioEncoder: AudioEncoderInner {
fn with_preset(format: AudioFormat, preset: AudioPreset) -> Result<Self>
where
Self: Sized;
}
pub trait AudioEncoderInner: Send + 'static {
fn name(&self) -> &str;
fn config(&self) -> hang::catalog::AudioConfig;
fn push_samples(&mut self, samples: &[f32]) -> Result<()>;
fn pop_packet(&mut self) -> Result<Option<hang::Frame>>;
}
impl AudioEncoderInner for Box<dyn AudioEncoder> {
fn name(&self) -> &str {
(&**self).name()
}
fn config(&self) -> hang::catalog::AudioConfig {
(&**self).config()
}
fn push_samples(&mut self, samples: &[f32]) -> Result<()> {
(&mut **self).push_samples(samples)
}
fn pop_packet(&mut self) -> Result<Option<hang::Frame>> {
(&mut **self).pop_packet()
}
}
pub trait AudioDecoder: Send + 'static {
fn new(config: &hang::catalog::AudioConfig, target_format: AudioFormat) -> Result<Self>
where
Self: Sized;
fn push_packet(&mut self, packet: hang::Frame) -> Result<()>;
fn pop_samples(&mut self) -> Result<Option<&[f32]>>;
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PixelFormat {
Rgba,
Bgra,
}
impl Default for PixelFormat {
fn default() -> Self {
PixelFormat::Rgba
}
}
#[derive(Clone, Debug)]
pub struct VideoFormat {
pub pixel_format: PixelFormat,
pub dimensions: [u32; 2],
}
#[derive(Clone, Debug)]
pub struct VideoFrame {
pub format: VideoFormat,
pub raw: bytes::Bytes,
}
pub trait VideoSource: Send + 'static {
fn name(&self) -> &str;
fn format(&self) -> VideoFormat;
fn pop_frame(&mut self) -> Result<Option<VideoFrame>>;
fn start(&mut self) -> Result<()>;
fn stop(&mut self) -> Result<()>;
}
pub trait VideoEncoder: VideoEncoderInner {
fn with_preset(preset: VideoPreset) -> Result<Self>
where
Self: Sized;
}
pub trait VideoEncoderInner: Send + 'static {
fn name(&self) -> &str;
fn config(&self) -> hang::catalog::VideoConfig;
fn push_frame(&mut self, frame: VideoFrame) -> Result<()>;
fn pop_packet(&mut self) -> Result<Option<hang::Frame>>;
}
impl VideoEncoderInner for Box<dyn VideoEncoder> {
fn name(&self) -> &str {
(&**self).name()
}
fn config(&self) -> hang::catalog::VideoConfig {
(&**self).config()
}
fn push_frame(&mut self, frame: VideoFrame) -> Result<()> {
(&mut **self).push_frame(frame)
}
fn pop_packet(&mut self) -> Result<Option<hang::Frame>> {
(&mut **self).pop_packet()
}
}
pub trait VideoDecoder: Send + 'static {
fn new(config: &hang::catalog::VideoConfig, playback_config: &DecodeConfig) -> Result<Self>
where
Self: Sized;
fn name(&self) -> &str;
fn pop_frame(&mut self) -> Result<Option<DecodedFrame>>;
fn push_packet(&mut self, packet: hang::Frame) -> Result<()>;
fn set_viewport(&mut self, w: u32, h: u32);
}
pub struct DecodedFrame {
pub frame: image::Frame,
pub timestamp: Duration,
}
impl DecodedFrame {
pub fn img(&self) -> &RgbaImage {
self.frame.buffer()
}
}
#[derive(Debug, Clone, Copy, Display, EnumString, VariantNames)]
#[strum(serialize_all = "lowercase")]
pub enum AudioCodec {
Opus,
}
#[derive(Debug, Clone, Copy, Display, EnumString, VariantNames)]
#[strum(serialize_all = "lowercase")]
pub enum VideoCodec {
H264,
Av1,
}
#[derive(Debug, Clone, Copy, Display, EnumString, VariantNames, Eq, PartialEq, Ord, PartialOrd)]
pub enum VideoPreset {
#[strum(serialize = "180p")]
P180,
#[strum(serialize = "360p")]
P360,
#[strum(serialize = "720p")]
P720,
#[strum(serialize = "1080p")]
P1080,
}
impl VideoPreset {
pub fn all() -> [VideoPreset; 4] {
[Self::P180, Self::P360, Self::P720, Self::P1080]
}
pub fn dimensions(&self) -> (u32, u32) {
match self {
Self::P180 => (320, 180),
Self::P360 => (640, 360),
Self::P720 => (1280, 720),
Self::P1080 => (1920, 1080),
}
}
pub fn width(&self) -> u32 {
self.dimensions().0
}
pub fn height(&self) -> u32 {
self.dimensions().1
}
pub fn fps(&self) -> u32 {
30
}
}
#[derive(Debug, Clone, Copy, Display, EnumString, VariantNames, Eq, PartialEq)]
#[strum(serialize_all = "lowercase")]
pub enum AudioPreset {
Hq,
Lq,
}
#[derive(Debug, Clone, Copy, Display, EnumString, VariantNames, Eq, PartialEq, Default)]
#[strum(serialize_all = "lowercase")]
pub enum Quality {
Highest,
#[default]
High,
Mid,
Low,
}
#[derive(Clone, Default)]
pub struct DecodeConfig {
pub pixel_format: PixelFormat,
}
#[derive(Clone, Default)]
pub struct PlaybackConfig {
pub decode_config: DecodeConfig,
pub quality: Quality,
}

View file

@ -0,0 +1,233 @@
use std::str::FromStr;
use anyhow::{Context, Result};
use nokhwa::{
nokhwa_initialize,
pixel_format::RgbFormat,
utils::{
CameraFormat, CameraIndex, FrameFormat, RequestedFormat, RequestedFormatType, Resolution,
},
};
use tracing::{debug, info, trace, warn};
use xcap::{Monitor, VideoRecorder};
use crate::{
av::{PixelFormat, VideoFormat, VideoFrame, VideoSource},
ffmpeg::util::MjpgDecoder,
};
pub struct ScreenCapturer {
pub(crate) _monitor: Monitor,
pub(crate) width: u32,
pub(crate) height: u32,
pub(crate) video_recorder: VideoRecorder,
pub(crate) rx: std::sync::mpsc::Receiver<xcap::Frame>,
}
// TODO: Review if sound.
unsafe impl Send for ScreenCapturer {}
impl Drop for ScreenCapturer {
fn drop(&mut self) {
self.video_recorder.stop().ok();
}
}
impl ScreenCapturer {
pub fn new() -> Result<Self> {
info!("Initializing screen capturer (xcap)");
let monitors = Monitor::all().context("Failed to get monitors")?;
if monitors.is_empty() {
return Err(anyhow::anyhow!("No monitors available"));
}
info!("Available monitors: {monitors:?}");
let monitor = monitors.into_iter().next().unwrap();
let width = monitor.width()?;
let height = monitor.height()?;
let name = monitor
.name()
.unwrap_or_else(|_| "Unknown Monitor".to_string());
info!("Using monitor: {} ({}x{})", name, width, height);
let (video_recorder, rx) = monitor.video_recorder()?;
Ok(Self {
_monitor: monitor,
video_recorder,
rx,
width,
height,
})
}
}
impl VideoSource for ScreenCapturer {
fn name(&self) -> &str {
"screen"
}
fn format(&self) -> VideoFormat {
VideoFormat {
pixel_format: PixelFormat::Rgba,
dimensions: [self.width, self.height],
}
}
fn start(&mut self) -> Result<()> {
self.video_recorder.start()?;
Ok(())
}
fn stop(&mut self) -> Result<()> {
self.video_recorder.stop()?;
Ok(())
}
fn pop_frame(&mut self) -> anyhow::Result<Option<VideoFrame>> {
let mut raw_frame = None;
// We are only interested in the latest frame.
// Drain the channel to not build up memory.
while let Ok(next) = self.rx.try_recv() {
raw_frame = Some(next)
}
let raw_frame = match raw_frame {
Some(frame) => frame,
None => self
.rx
.recv()
.context("Screen recorder did not produce new frame")?,
};
Ok(Some(VideoFrame {
format: VideoFormat {
pixel_format: PixelFormat::Rgba,
dimensions: [raw_frame.width, raw_frame.height],
},
raw: raw_frame.raw.into(),
}))
}
}
pub struct CameraCapturer {
pub(crate) camera: nokhwa::Camera,
pub(crate) mjpg_decoder: MjpgDecoder,
pub(crate) width: u32,
pub(crate) height: u32,
}
impl CameraCapturer {
pub fn new() -> Result<Self> {
info!("Initializing camera capturer (nokhwa)");
nokhwa_initialize(|granted| {
debug!("User selected camera access: {}", granted);
});
let cameras = nokhwa::query(nokhwa::utils::ApiBackend::Auto)?;
if cameras.is_empty() {
return Err(anyhow::anyhow!("No cameras available"));
}
info!("Available cameras: {cameras:?}");
let camera_index = match std::env::var("IROH_LIVE_CAMERA").ok() {
None => {
// Order of cameras in nokhwa is reversed from usual order (primary camera is last).
let first_camera = cameras.last().unwrap();
info!(": {}", first_camera.human_name());
first_camera.index().clone()
}
Some(camera_name) => match u32::from_str(&camera_name).ok() {
Some(num) => CameraIndex::Index(num),
None => CameraIndex::String(camera_name),
},
};
let mut camera = nokhwa::Camera::new(
camera_index,
RequestedFormat::new::<RgbFormat>(RequestedFormatType::AbsoluteHighestResolution),
)?;
info!("Using camera: {}", camera.info().human_name());
let available_formats = camera.compatible_camera_formats()?;
debug!("Available formats: {available_formats:?}",);
if let Some(format) = Self::select_format(available_formats, Resolution::new(1920, 1080)) {
if let Err(err) = camera.set_camera_requset(RequestedFormat::new::<RgbFormat>(
RequestedFormatType::Exact(format),
)) {
warn!(?format, "Failed to change camera format: {err:#}");
}
}
info!("Using format: {}", camera.camera_format());
let resolution = camera.resolution();
Ok(Self {
camera,
mjpg_decoder: MjpgDecoder::new()?,
width: resolution.width(),
height: resolution.height(),
})
}
fn select_format(
mut formats: Vec<CameraFormat>,
desired_resolution: Resolution,
) -> Option<CameraFormat> {
formats.sort_by(|a, b| {
a.resolution()
.cmp(&b.resolution())
.then(a.frame_rate().cmp(&b.frame_rate()))
});
formats
.iter()
.find(|format| format.resolution() >= desired_resolution)
.or_else(|| formats.last())
.cloned()
}
}
impl VideoSource for CameraCapturer {
fn name(&self) -> &str {
"cam"
}
fn format(&self) -> VideoFormat {
VideoFormat {
pixel_format: PixelFormat::Rgba,
dimensions: [self.width, self.height],
}
}
fn start(&mut self) -> Result<()> {
self.camera.open_stream()?;
Ok(())
}
fn stop(&mut self) -> Result<()> {
self.camera.stop_stream()?;
Ok(())
}
fn pop_frame(&mut self) -> anyhow::Result<Option<VideoFrame>> {
let start = std::time::Instant::now();
let frame = self
.camera
.frame()
.context("Failed to capture camera frame")?;
trace!("pop frame: capture took {:?}", start.elapsed());
let start = std::time::Instant::now();
let frame = match frame.source_frame_format() {
FrameFormat::MJPEG if std::env::var("IROH_LIVE_MJPEG_FFMPEG").is_ok() => {
trace!("decode ffmpeg");
self.mjpg_decoder.decode_frame(frame.buffer())?
}
_ => {
let image = frame
.decode_image::<nokhwa::pixel_format::RgbAFormat>()
.context("Failed to decode camera frame")?;
VideoFrame {
format: self.format(),
raw: image.into_raw().into(),
}
}
};
trace!("pop frame: decode took {:?}", start.elapsed());
Ok(Some(frame))
}
}

View file

@ -0,0 +1,93 @@
use anyhow::Result;
use ffmpeg_next::{self as ffmpeg, util::channel_layout::ChannelLayout};
use hang::catalog::AudioConfig;
use crate::{
av::{AudioDecoder, AudioFormat},
ffmpeg::ext::{CodecContextExt, PacketExt},
};
pub struct FfmpegAudioDecoder {
codec: ffmpeg::decoder::Audio,
resampler: ffmpeg::software::resampling::Context,
decoded_frame: ffmpeg::util::frame::Audio,
resampled_frame: ffmpeg::util::frame::Audio,
}
impl AudioDecoder for FfmpegAudioDecoder {
fn new(config: &AudioConfig, target_format: AudioFormat) -> Result<Self>
where
Self: Sized,
{
let codec = match config.codec {
hang::catalog::AudioCodec::Opus => {
let codec_id = ffmpeg::codec::Id::OPUS;
let codec = ffmpeg::decoder::find(codec_id).unwrap();
let mut ctx = ffmpeg::codec::Context::new_with_codec(codec)
.decoder()
.audio()?;
if let Some(extradata) = &config.description {
ctx.set_extradata(&extradata)?;
}
ctx.set_channel_layout(if config.channel_count == 1 {
ChannelLayout::MONO
} else {
ChannelLayout::STEREO
});
unsafe {
let ctx_mut = ctx.as_mut_ptr();
(*ctx_mut).sample_rate = config.sample_rate as i32;
}
ctx
}
_ => anyhow::bail!(
"Unsupported codec {} (only opus is supported)",
config.codec
),
};
let target_channel_layout = match target_format.channel_count {
1 => ChannelLayout::MONO,
2 => ChannelLayout::STEREO,
_ => anyhow::bail!("unsupported target channel count"),
};
let target_sample_format = ffmpeg_next::util::format::sample::Sample::F32(
ffmpeg_next::util::format::sample::Type::Packed,
);
let resampler = ffmpeg::software::resampling::Context::get(
codec.format(),
codec.channel_layout(),
codec.rate(),
target_sample_format,
target_channel_layout,
target_format.sample_rate,
)?;
Ok(Self {
codec,
resampler,
decoded_frame: ffmpeg::util::frame::Audio::empty(),
resampled_frame: ffmpeg::util::frame::Audio::empty(),
})
}
fn push_packet(&mut self, packet: hang::Frame) -> Result<()> {
let packet = packet.payload.to_ffmpeg_packet();
self.codec.send_packet(&packet)?;
Ok(())
}
fn pop_samples(&mut self) -> Result<Option<&[f32]>> {
match self.codec.receive_frame(&mut self.decoded_frame) {
Err(err) => Err(err.into()),
Ok(()) => {
// Create an empty frame to hold the resampled audio data.
self.resampler
.run(&self.decoded_frame, &mut self.resampled_frame)
.unwrap();
let frame = &self.resampled_frame;
let expected_bytes =
frame.samples() * frame.channels() as usize * core::mem::size_of::<f32>();
Ok(Some(bytemuck::cast_slice(&frame.data(0)[..expected_bytes])))
}
}
}
}

View file

@ -0,0 +1,153 @@
use anyhow::{Context, Result};
use ffmpeg_next::{self as ffmpeg, Rational};
use hang::{Timestamp, catalog::AudioConfig};
use tracing::trace;
use crate::{
av::{AudioEncoder, AudioEncoderInner, AudioFormat, AudioPreset},
ffmpeg::ext::CodecContextExt,
};
const SAMPLE_RATE: u32 = 48_000;
const BITRATE: u64 = 128_000; // 128 kbps
pub struct OpusEncoder {
encoder: ffmpeg::encoder::Audio,
frame_count: u64,
sample_rate: u32,
bitrate: u64,
channel_count: u32,
extradata: Vec<u8>,
}
impl OpusEncoder {
pub fn stereo() -> Result<Self> {
Self::new(SAMPLE_RATE, 2, BITRATE)
}
pub fn mono() -> Result<Self> {
Self::new(SAMPLE_RATE, 1, BITRATE)
}
pub fn new(sample_rate: u32, channel_count: u32, bitrate: u64) -> Result<Self> {
tracing::info!(
"Initializing Opus encoder: {}Hz, {} channels",
sample_rate,
channel_count
);
ffmpeg::init()?;
let codec =
ffmpeg::encoder::find(ffmpeg::codec::Id::OPUS).context("Opus encoder not found")?;
tracing::debug!("Found Opus codec: {:?}", codec.name());
let mut ctx = ffmpeg::codec::context::Context::new_with_codec(codec)
.encoder()
.audio()?;
let sample_rate = sample_rate as i32;
ctx.set_rate(sample_rate);
ctx.set_bit_rate(bitrate as usize);
ctx.set_format(ffmpeg::format::Sample::F32(
ffmpeg_next::format::sample::Type::Packed,
));
ctx.set_time_base(Rational::new(1, sample_rate));
ctx.set_channel_layout(if channel_count == 1 {
ffmpeg::util::channel_layout::ChannelLayout::MONO
} else {
ffmpeg::util::channel_layout::ChannelLayout::STEREO
});
let encoder = ctx.open()?;
let extradata = encoder.extradata().unwrap_or(&[]).to_vec();
tracing::info!("Opus encoder initialized successfully");
Ok(Self {
encoder,
frame_count: 0,
sample_rate: sample_rate as u32,
channel_count,
extradata,
bitrate,
})
}
}
impl AudioEncoder for OpusEncoder {
fn with_preset(format: AudioFormat, preset: AudioPreset) -> Result<Self>
where
Self: Sized,
{
let channel_count = format.channel_count;
let bitrate = match preset {
AudioPreset::Hq => BITRATE,
AudioPreset::Lq => 32_000,
};
Self::new(SAMPLE_RATE, channel_count, bitrate)
}
}
impl AudioEncoderInner for OpusEncoder {
fn name(&self) -> &str {
self.encoder.id().name()
}
fn config(&self) -> AudioConfig {
hang::catalog::AudioConfig {
codec: hang::catalog::AudioCodec::Opus,
sample_rate: self.sample_rate,
channel_count: self.channel_count,
bitrate: Some(self.bitrate),
description: Some(self.extradata.clone().into()),
}
}
fn push_samples(&mut self, samples: &[f32]) -> Result<()> {
if samples.is_empty() {
return Ok(());
}
let samples_per_channel = samples.len() / self.channel_count as usize;
debug_assert_eq!(samples_per_channel as u32, self.encoder.frame_size());
let mut audio_frame = ffmpeg::util::frame::Audio::new(
ffmpeg::util::format::sample::Sample::F32(ffmpeg::util::format::sample::Type::Packed),
samples_per_channel,
ffmpeg::util::channel_layout::ChannelLayout::default(self.channel_count as i32),
);
// Copy interleaved samples directly since we're using packed format
let frame_data = audio_frame.data_mut(0);
let frame_samples: &mut [f32] = bytemuck::cast_slice_mut(frame_data);
let copy_len = samples.len().min(frame_samples.len());
frame_samples[..copy_len].copy_from_slice(&samples[..copy_len]);
audio_frame.set_pts(Some(self.frame_count as i64));
self.frame_count += samples_per_channel as u64;
trace!("push samples {}", audio_frame.samples());
self.encoder.send_frame(&audio_frame)?;
Ok(())
}
fn pop_packet(&mut self) -> Result<Option<hang::Frame>> {
let mut packet = ffmpeg::packet::Packet::empty();
match self.encoder.receive_packet(&mut packet) {
Ok(()) => {
let payload = packet.data().unwrap_or(&[]).to_vec();
let hang_frame = hang::Frame {
payload: payload.into(),
timestamp: Timestamp::from_micros(
(self.frame_count * 1_000_000) / self.sample_rate as u64,
)?,
keyframe: true, // Audio frames are generally independent
};
trace!("poll frame {}", hang_frame.payload.num_bytes());
Ok(Some(hang_frame))
}
Err(ffmpeg::Error::Eof) => Ok(None),
Err(ffmpeg::Error::Other { errno }) if errno == ffmpeg::util::error::EAGAIN => Ok(None),
Err(e) => Err(e.into()),
}
}
}

View file

@ -0,0 +1,108 @@
use crate::av::Decoders;
pub use self::{audio::*, ext::ffmpeg_log_init, video::*};
#[derive(Debug, Clone, Copy)]
pub struct FfmpegDecoders;
impl Decoders for FfmpegDecoders {
type Audio = FfmpegAudioDecoder;
type Video = FfmpegVideoDecoder;
}
mod audio {
mod decoder;
mod encoder;
pub use decoder::*;
pub use encoder::*;
}
pub mod video {
mod decoder;
mod encoder;
pub(crate) mod util;
pub use decoder::*;
pub use encoder::*;
}
pub(crate) mod ext {
use buf_list::BufList;
use bytes::Buf;
use ffmpeg_next as ffmpeg;
pub fn ffmpeg_log_init() {
use ffmpeg::util::log::Level::*;
let level = if let Ok(val) = std::env::var("FFMPEG_LOG") {
match val.as_str() {
"quiet" => Quiet,
"panic" => Panic,
"fatal" => Fatal,
"error" => Error,
"warn" | "warning" => Warning,
"info" => Info,
"verbose" => Verbose,
"debug" => Debug,
"trace" => Trace,
_ => Warning,
}
} else {
Warning
};
ffmpeg::util::log::set_level(level);
}
pub trait PacketExt {
fn to_ffmpeg_packet(self) -> ffmpeg::Packet;
}
impl PacketExt for BufList {
fn to_ffmpeg_packet(mut self) -> ffmpeg_next::Packet {
let mut packet = ffmpeg::Packet::new(self.num_bytes());
let dst = packet.data_mut().unwrap();
self.copy_to_slice(dst);
packet
}
}
pub trait CodecContextExt {
fn extradata(&self) -> Option<&[u8]>;
fn set_extradata(&mut self, extradata: &[u8]) -> Result<(), ffmpeg::Error>;
}
impl CodecContextExt for ffmpeg::codec::Context {
// SAFETY: Written by ChatGPT, so, dunno.
fn extradata(&self) -> Option<&[u8]> {
unsafe {
let ctx = self.as_ptr();
if (*ctx).extradata.is_null() || (*ctx).extradata_size <= 0 {
return None;
}
Some(std::slice::from_raw_parts(
(*ctx).extradata as *const u8,
(*ctx).extradata_size as usize,
))
}
}
// SAFETY: Written by ChatGPT, so, dunno.
fn set_extradata(&mut self, extradata: &[u8]) -> Result<(), ffmpeg::Error> {
unsafe {
let ctx = self.as_mut_ptr();
// allocate extradata + padding
let pad = ffmpeg::ffi::AV_INPUT_BUFFER_PADDING_SIZE as usize;
let size = extradata.len() + pad;
(*ctx).extradata = ffmpeg::ffi::av_mallocz(size).cast::<u8>();
if (*ctx).extradata.is_null() {
return Err(ffmpeg::Error::Bug.into());
}
// copy bytes and zero the padding
std::ptr::copy_nonoverlapping(
extradata.as_ptr(),
(*ctx).extradata,
extradata.len(),
);
(*ctx).extradata_size = extradata.len() as i32;
}
Ok(())
}
}
}

View file

@ -0,0 +1,139 @@
use anyhow::{Context, Result};
use ffmpeg_next::{
self as ffmpeg, codec, codec::Id as CodecId, util::frame::video::Video as FfmpegFrame,
};
use crate::{
av::{self, DecodeConfig, DecodedFrame, VideoDecoder},
ffmpeg::{
ext::{CodecContextExt, PacketExt},
video::util::{Rescaler, StreamClock},
},
};
pub struct FfmpegVideoDecoder {
codec: ffmpeg::decoder::Video,
rescaler: Rescaler,
clock: StreamClock,
decoded: FfmpegFrame,
viewport_changed: Option<(u32, u32)>,
last_timestamp: Option<hang::Timestamp>,
}
impl VideoDecoder for FfmpegVideoDecoder {
fn name(&self) -> &str {
self.codec.id().name()
}
fn new(config: &hang::catalog::VideoConfig, playback_config: &DecodeConfig) -> Result<Self>
where
Self: Sized,
{
ffmpeg::init()?;
// Build a decoder context for H.264 and attach extradata (e.g., avcC)
let codec = match &config.codec {
hang::catalog::VideoCodec::H264(_meta) => {
let codec =
codec::decoder::find(CodecId::H264).context("H.264 decoder not found")?;
let mut ctx = codec::context::Context::new_with_codec(codec);
if let Some(description) = &config.description {
ctx.set_extradata(&description)?;
}
ctx.decoder().video().unwrap()
}
hang::catalog::VideoCodec::AV1(_meta) => {
let codec = codec::decoder::find(CodecId::AV1).context("AV1 decoder not found")?;
let mut ctx = codec::context::Context::new_with_codec(codec);
if let Some(description) = &config.description {
ctx.set_extradata(&description)?;
}
ctx.decoder().video().unwrap()
}
_ => anyhow::bail!(
"Unsupported codec {} (only h264 and av1 are supported)",
config.codec
),
};
let rescaler = Rescaler::new(playback_config.pixel_format.to_ffmpeg(), None)?;
let clock = StreamClock::default();
let decoded = FfmpegFrame::empty();
Ok(Self {
codec,
rescaler,
clock,
decoded,
viewport_changed: None,
last_timestamp: None,
})
}
fn set_viewport(&mut self, w: u32, h: u32) {
self.viewport_changed = Some((w, h));
}
fn push_packet(&mut self, packet: hang::Frame) -> Result<()> {
let ffmpeg_packet = packet.payload.to_ffmpeg_packet();
self.codec.send_packet(&ffmpeg_packet)?;
self.last_timestamp = Some(packet.timestamp);
Ok(())
}
fn pop_frame(&mut self) -> Result<Option<av::DecodedFrame>> {
// Pull all available decoded frames
match self.codec.receive_frame(&mut self.decoded) {
Ok(()) => {
// Apply clamped target size.
if let Some((max_width, max_height)) = self.viewport_changed.take() {
let (width, height) =
calculate_resized_size(&self.decoded, max_width, max_height);
self.rescaler.set_target_dimensions(width, height);
}
let frame = self.rescaler.process(&mut self.decoded)?;
let last_timestamp = self
.last_timestamp
.as_ref()
.context("missing last packet")?;
let frame = DecodedFrame::from_ffmpeg(
frame,
self.clock.frame_delay(&last_timestamp),
std::time::Duration::from(*last_timestamp),
);
Ok(Some(frame))
}
Err(ffmpeg::util::error::Error::BufferTooSmall) => Ok(None),
Err(ffmpeg::Error::Other { errno }) if errno == ffmpeg::util::error::EAGAIN => Ok(None),
Err(err) => {
// tracing::warn!("decoder error: {err} {err:?} {err:#?}");
// Ok(None)
Err(err.into())
}
}
}
}
/// Calculates the target frame size to fit into the requested bounds while preserving aspect ratio.
fn calculate_resized_size(decoded: &FfmpegFrame, max_width: u32, max_height: u32) -> (u32, u32) {
let src_w = decoded.width().max(1);
let src_h = decoded.height().max(1);
let max_w = max_width.max(1);
let max_h = max_height.max(1);
// Fit within requested bounds, preserve aspect ratio, never upscale
let scale_w = (max_w as f32) / (src_w as f32);
let scale_h = (max_h as f32) / (src_h as f32);
let scale = scale_w.min(scale_h).min(1.0).max(0.0);
let target_width = ((src_w as f32) * scale).floor().max(1.0) as u32;
let target_height = ((src_h as f32) * scale).floor().max(1.0) as u32;
tracing::debug!(
src_w,
src_h,
max_w,
max_h,
target_width,
target_height,
"scale"
);
(target_width, target_height)
}

View file

@ -0,0 +1,568 @@
use std::{
ffi::{CString, c_int},
ptr,
task::Poll,
};
use anyhow::{Context, Result, anyhow};
use ffmpeg_next::{self as ffmpeg, codec, format::Pixel, frame::Video as VideoFrame};
use hang::Timestamp;
use tracing::{debug, info, trace};
use crate::{
av,
ffmpeg::{ext::CodecContextExt, util::Rescaler},
};
#[derive(Debug, Clone, Copy, Default)]
// Allow unused because usage is cfg-gated on platform.
#[allow(unused)]
enum HwBackend {
#[default]
Software,
/// Linux
Vaapi,
/// macOS
Videotoolbox,
/// Nvidia GPUs
Nvenc,
/// Intel GPUs
Qsv,
/// AMD GPUs
Amf,
// TODO:
// Add DirectX (Windows)
// Add MediaCodec (Android)
}
impl HwBackend {
fn codec_name(&self) -> &'static str {
match self {
Self::Software => "libx264",
Self::Vaapi => "h264_vaapi",
Self::Videotoolbox => "h264_videotoolbox",
Self::Nvenc => "h264_nvenc",
Self::Qsv => "h264_qsv",
Self::Amf => "h264_amf",
}
}
fn candidates() -> Vec<Self> {
// vec![HwBackend::Software]
let mut candidates = Vec::new();
// Platform-preferred order
#[cfg(target_os = "macos")]
candidates.extend_from_slice(&[HwBackend::Videotoolbox]);
#[cfg(target_os = "windows")]
candidates.extend_from_slice(&[HwBackend::Nvenc, HwBackend::Qsv, HwBackend::Amf]);
#[cfg(target_os = "linux")]
candidates.extend_from_slice(&[HwBackend::Vaapi, HwBackend::Nvenc, HwBackend::Qsv]);
// Always end with software
candidates.push(HwBackend::Software);
candidates
}
fn pixel_format(&self) -> Pixel {
match self {
HwBackend::Vaapi | HwBackend::Qsv => Pixel::NV12,
// These rest accepts yuv420p SW frames:
_ => Pixel::YUV420P,
}
}
fn hardware_pixel_format(&self) -> Pixel {
match self {
HwBackend::Vaapi => Pixel::VAAPI,
HwBackend::Qsv => Pixel::NV12,
// These rest accepts yuv420p SW frames:
_ => Pixel::YUV420P,
}
}
}
#[derive(Debug, Clone)]
struct EncoderOpts {
width: u32,
height: u32,
framerate: u32,
bitrate: u64,
}
pub struct H264Encoder {
encoder: ffmpeg::encoder::video::Encoder,
rescaler: Rescaler,
backend: HwBackend,
vaapi: Option<VaapiState>,
opts: EncoderOpts,
frame_count: u64,
}
impl H264Encoder {
pub fn new(width: u32, height: u32, framerate: u32) -> Result<Self> {
info!("Initializing H264 encoder: {width}x{height} @ {framerate}fps");
ffmpeg::init()?;
// Bitrate heuristic (from your original)
let pixels = width * height;
let framerate_factor = 30.0 + (framerate as f32 - 30.) / 2.;
let bitrate = (pixels as f32 * 0.07 * framerate_factor).round() as u64;
let opts = EncoderOpts {
width,
height,
framerate,
bitrate,
};
let candidates = HwBackend::candidates();
// Try each backend
let mut last_err: Option<anyhow::Error> = None;
for backend in candidates {
match Self::open_encoder(backend, &opts) {
Ok((encoder, rescaler, vaapi)) => {
info!(
"Using encoder backend: {} ({backend:?})",
backend.codec_name()
);
return Ok(Self {
encoder,
rescaler,
vaapi,
backend,
opts,
frame_count: 0,
});
}
Err(e) => {
debug!(
"Backend {backend:?} ({}) not available: {e:#}",
backend.codec_name()
);
last_err = Some(e);
}
}
}
Err(last_err.unwrap_or_else(|| anyhow!("no H.264 encoder available")))
}
fn open_encoder(
backend: HwBackend,
opts: &EncoderOpts,
) -> Result<(
ffmpeg::encoder::video::Encoder,
Rescaler,
Option<VaapiState>,
)> {
// Find encoder
let codec = ffmpeg::codec::encoder::find_by_name(backend.codec_name())
.with_context(|| format!("encoder {} not found", backend.codec_name()))?;
debug!("Found encoder: {}", codec.name());
// Build ctx
let mut ctx = codec::context::Context::new_with_codec(codec);
unsafe {
let ctx_mut = ctx.as_mut_ptr();
(*ctx_mut).width = opts.width as i32;
(*ctx_mut).height = opts.height as i32;
(*ctx_mut).time_base.num = 1;
(*ctx_mut).time_base.den = opts.framerate as i32;
(*ctx_mut).framerate.num = opts.framerate as i32;
(*ctx_mut).framerate.den = 1;
(*ctx_mut).gop_size = opts.framerate as i32;
(*ctx_mut).bit_rate = opts.bitrate as i64;
(*ctx_mut).flags = (*ctx_mut).flags | codec::Flags::GLOBAL_HEADER.bits() as c_int;
(*ctx_mut).pix_fmt = backend.hardware_pixel_format().into();
}
// Backend-specific prep
let vaapi_state = if matches!(backend, HwBackend::Vaapi) {
// single-GPU default; make configurable if needed
let va = VaapiState::new(opts.width, opts.height, "/dev/dri/renderD128")?;
va.bind_to_context(&mut ctx);
Some(va)
} else {
None
};
// Setup encoder options
let enc_opts = {
let mut opts = vec![
// Disable annexB so that we get an avcC header in extradata
// annexb=0 → MP4/ISO BMFF style (length-prefixed NAL units + avcC extradata),
// as opposed to Annex B start codes (00 00 00 01).
("annexB", "0"),
];
if matches!(backend, HwBackend::Software) {
opts.extend_from_slice(&[
("preset", "ultrafast"),
("tune", "zerolatency"),
("profile", "baseline"),
]);
}
ffmpeg::Dictionary::from_iter(opts.into_iter())
};
// Open encoder
let encoder = ctx.encoder().video()?.open_as_with(codec, enc_opts)?;
// Build rescaler to SW input fmt expected per-backend
let rescaler = Rescaler::new(backend.pixel_format(), Some((opts.width, opts.height)))?;
Ok((encoder, rescaler, vaapi_state))
}
pub fn video_config(&self) -> Result<hang::catalog::VideoConfig> {
Ok(hang::catalog::VideoConfig {
codec: hang::catalog::VideoCodec::H264(hang::catalog::H264 {
profile: 0x42, // Baseline
constraints: 0xE0,
level: 0x1E, // Level 3.0
inline: false, // TODO: is this correct?
}),
description: Some(self.avcc_description()?.to_vec().into()),
coded_width: Some(self.opts.width),
coded_height: Some(self.opts.height),
display_ratio_width: None,
display_ratio_height: None,
bitrate: Some(self.opts.bitrate),
framerate: Some(self.opts.framerate as f64),
optimize_for_latency: Some(true),
})
}
pub fn avcc_description(&self) -> Result<&[u8]> {
self.encoder.extradata().context("missing avcC extradata")
}
pub fn receive_packet(&mut self) -> Result<Poll<Option<hang::Frame>>> {
loop {
let mut packet = ffmpeg::packet::Packet::empty();
match self.encoder.receive_packet(&mut packet) {
Ok(()) => {
let payload = packet.data().unwrap_or(&[]).to_vec();
let hang_frame = hang::Frame {
payload: payload.into(),
timestamp: Timestamp::from_micros(
self.frame_count * 1_000_000 / self.opts.framerate as u64,
)?,
keyframe: packet.is_key(),
};
return Ok(Poll::Ready(Some(hang_frame)));
}
Err(ffmpeg::Error::Eof) => return Ok(Poll::Ready(None)),
Err(ffmpeg::Error::Other { errno }) if errno == ffmpeg::util::error::EAGAIN => {
return Ok(Poll::Pending);
}
Err(e) => return Err(e.into()),
}
}
}
pub fn encode_frame(&mut self, mut frame: VideoFrame) -> Result<()> {
frame.set_pts(Some(self.frame_count as i64));
self.frame_count += 1;
if self.frame_count % self.opts.framerate as u64 == 0 {
tracing::trace!(
"Encoding {}: {}x{} fmt={:?} pts={:?} backend={:?}",
self.frame_count,
frame.width(),
frame.height(),
frame.format(),
frame.pts(),
self.backend
);
}
// Convert incoming frame to the SW format the backend expects.
let frame = self
.rescaler
.process(&frame)
.context("failed to color-convert frame")?
.clone();
let frame = match self.backend {
HwBackend::Vaapi => {
let va = self
.vaapi
.as_ref()
.ok_or_else(|| anyhow!("no vaapi state"))?;
let hw_frame = va.transfer_nv12_to_hw(&frame)?;
hw_frame
}
// Other backends accept SW frames directly
_ => frame,
};
self.encoder
.send_frame(&frame)
.map_err(|e| anyhow!("send_frame failed: {e:?}"))?;
Ok(())
}
pub fn flush(&mut self) -> Result<()> {
self.encoder.send_eof()?;
Ok(())
}
}
impl av::VideoEncoder for H264Encoder {
fn with_preset(preset: av::VideoPreset) -> Result<Self>
where
Self: Sized,
{
Self::new(preset.width(), preset.height(), preset.fps())
}
}
impl av::VideoEncoderInner for H264Encoder {
fn name(&self) -> &str {
self.encoder.id().name()
}
fn config(&self) -> hang::catalog::VideoConfig {
self.video_config().expect("video_config available")
}
fn push_frame(&mut self, frame: av::VideoFrame) -> anyhow::Result<()> {
trace!(len = frame.raw.len(), format=?frame.format, "push frame");
let frame = frame.to_ffmpeg();
self.encode_frame(frame)
}
fn pop_packet(&mut self) -> anyhow::Result<Option<hang::Frame>> {
match self.receive_packet()? {
std::task::Poll::Ready(v) => Ok(v),
std::task::Poll::Pending => Ok(None),
}
}
}
struct VaapiState {
device_ctx: *mut ffmpeg::sys::AVBufferRef,
frames_ctx: *mut ffmpeg::sys::AVBufferRef,
}
unsafe impl Send for VaapiState {}
impl VaapiState {
/// Create VAAPI device + frames pool (NV12→VAAPI surfaces) for given size.
fn new(width: u32, height: u32, device_path: &str) -> Result<Self> {
// 1) Create VAAPI device
let cpath = CString::new(device_path)?;
let mut dev: *mut ffmpeg::sys::AVBufferRef = ptr::null_mut();
let ret = unsafe {
ffmpeg::sys::av_hwdevice_ctx_create(
&mut dev,
ffmpeg::sys::AVHWDeviceType::AV_HWDEVICE_TYPE_VAAPI,
cpath.as_ptr(),
ptr::null_mut(),
0,
)
};
if ret < 0 || dev.is_null() {
unsafe { ffmpeg::sys::av_buffer_unref(&mut dev) };
return Err(anyhow!("vaapi device create failed: {ret}"));
}
// 2) Create frames pool for VAAPI with SW format NV12
let frames = unsafe { ffmpeg::sys::av_hwframe_ctx_alloc(dev) };
if frames.is_null() {
unsafe { ffmpeg::sys::av_buffer_unref(&mut dev) };
return Err(anyhow!("av_hwframe_ctx_alloc failed"));
}
let fc = unsafe { &mut *((*frames).data as *mut ffmpeg::sys::AVHWFramesContext) };
fc.format = ffmpeg::sys::AVPixelFormat::AV_PIX_FMT_VAAPI;
fc.sw_format = ffmpeg::sys::AVPixelFormat::AV_PIX_FMT_NV12;
fc.width = width as i32;
fc.height = height as i32;
fc.initial_pool_size = 32;
let ret = unsafe { ffmpeg::sys::av_hwframe_ctx_init(frames) };
if ret < 0 {
unsafe {
ffmpeg::sys::av_buffer_unref(&mut (frames as *mut _));
ffmpeg::sys::av_buffer_unref(&mut dev);
}
return Err(anyhow!("av_hwframe_ctx_init failed: {ret}"));
}
Ok(Self {
device_ctx: dev,
frames_ctx: frames,
})
}
/// Attach the frames context so the codec context expects VAAPI frames.
fn bind_to_context(&self, ctx: &mut codec::context::Context) {
unsafe {
let ctx = ctx.as_mut_ptr();
(*ctx).hw_frames_ctx = ffmpeg::sys::av_buffer_ref(self.frames_ctx);
(*ctx).pix_fmt = Pixel::VAAPI.into();
}
}
/// Transfer a SW NV12 frame into a VAAPI HW frame, preserving PTS.
/// Returns a new `VideoFrame` backed by a VAAPI surface.
fn transfer_nv12_to_hw(&self, sw_frame: &VideoFrame) -> Result<VideoFrame> {
unsafe {
// Allocate an empty HW frame from the pool
let mut hw = ffmpeg::frame::Video::empty();
let ret = ffmpeg::sys::av_hwframe_get_buffer(self.frames_ctx, hw.as_mut_ptr(), 0);
if ret < 0 {
return Err(anyhow!("av_hwframe_get_buffer failed: {ret}"));
}
// Keep PTS
(*hw.as_mut_ptr()).pts = sw_frame.pts().unwrap_or(0);
// Transfer SW NV12 → HW VAAPI surface
let ret = ffmpeg::sys::av_hwframe_transfer_data(hw.as_mut_ptr(), sw_frame.as_ptr(), 0);
if ret < 0 {
return Err(anyhow!("av_hwframe_transfer_data failed: {ret}"));
}
Ok(hw)
}
}
}
impl Drop for VaapiState {
fn drop(&mut self) {
unsafe {
if !self.frames_ctx.is_null() {
ffmpeg::sys::av_buffer_unref(&mut (self.frames_ctx as *mut _));
}
if !self.device_ctx.is_null() {
ffmpeg::sys::av_buffer_unref(&mut (self.device_ctx as *mut _));
}
}
}
}
// pub struct Av1FfmpegEncoder {
// encoder: ffmpeg::encoder::video::Encoder,
// rescaler: Rescaler,
// opts: EncoderOpts,
// frame_count: u64,
// }
// impl Av1FfmpegEncoder {
// pub fn new(width: u32, height: u32, framerate: u32) -> Result<Self> {
// info!("Initializing AV1 (FFmpeg) encoder: {width}x{height} @ {framerate}fps");
// ffmpeg::init()?;
// let pixels = width * height;
// let framerate_factor = 30.0 + (framerate as f32 - 30.) / 2.;
// let bitrate = (pixels as f32 * 0.05 * framerate_factor).round() as u64;
// let opts = EncoderOpts {
// width,
// height,
// framerate,
// bitrate,
// };
// let codec = ffmpeg::encoder::find(ffmpeg::codec::Id::AV1).context("AV1 codec not found")?;
// let mut ctx = codec::context::Context::new_with_codec(codec);
// unsafe {
// let ctx_mut = ctx.as_mut_ptr();
// (*ctx_mut).width = width as i32;
// (*ctx_mut).height = height as i32;
// (*ctx_mut).time_base.num = 1;
// (*ctx_mut).time_base.den = framerate as i32;
// (*ctx_mut).framerate.num = framerate as i32;
// (*ctx_mut).framerate.den = 1;
// (*ctx_mut).gop_size = framerate as i32;
// (*ctx_mut).bit_rate = bitrate as i64;
// (*ctx_mut).pix_fmt = Pixel::YUV420P.into();
// }
// // libaom options for realtime
// let enc_opts =
// ffmpeg::Dictionary::from_iter([("cpu-used", "8"), ("row-mt", "1"), ("tiles", "2x2")]);
// let encoder = ctx.encoder().video()?.open_as_with(
// ffmpeg::encoder::find(ffmpeg::codec::Id::AV1).unwrap(),
// enc_opts,
// )?;
// let rescaler = Rescaler::new(Pixel::YUV420P, Some((width, height)))?;
// Ok(Self {
// encoder,
// rescaler,
// opts,
// frame_count: 0,
// })
// }
// }
// impl av::VideoEncoder for Av1FfmpegEncoder {
// fn with_preset(preset: av::VideoPreset) -> Result<Self>
// where
// Self: Sized,
// {
// Self::new(preset.width(), preset.height(), preset.fps())
// }
// fn config(&self) -> hang::catalog::VideoConfig {
// hang::catalog::VideoConfig {
// codec: hang::catalog::VideoCodec::AV1(Default::default()),
// description: None,
// coded_width: Some(self.opts.width),
// coded_height: Some(self.opts.height),
// display_ratio_width: None,
// display_ratio_height: None,
// bitrate: Some(self.opts.bitrate),
// framerate: Some(self.opts.framerate as f64),
// optimize_for_latency: Some(true),
// }
// }
// fn push_frame(
// &mut self,
// format: &av::VideoFormat,
// frame: av::VideoFrame,
// ) -> anyhow::Result<()> {
// use ffmpeg_next::frame::Video as FfFrame;
// let pixel = match format.pixel_format {
// av::PixelFormat::Rgba => Pixel::RGBA,
// av::PixelFormat::Bgra => Pixel::BGRA,
// };
// let [w, h] = format.dimensions;
// let mut ff = FfFrame::new(pixel, w, h);
// let stride = ff.stride(0) as usize;
// let row_bytes = (w as usize) * 4;
// for y in 0..(h as usize) {
// let dst_off = y * stride;
// let src_off = y * row_bytes;
// ff.data_mut(0)[dst_off..dst_off + row_bytes]
// .copy_from_slice(&frame.raw[src_off..src_off + row_bytes]);
// }
// let sw = self
// .rescaler
// .process(&ff)
// .context("failed to color-convert frame")?
// .clone();
// let mut enc_frame = sw;
// enc_frame.set_pts(Some(self.frame_count as i64));
// self.frame_count += 1;
// self.encoder.send_frame(&enc_frame)?;
// Ok(())
// }
// fn pop_packet(&mut self) -> anyhow::Result<Option<hang::Frame>> {
// let mut packet = ffmpeg::packet::Packet::empty();
// match self.encoder.receive_packet(&mut packet) {
// Ok(()) => {
// let payload = packet.data().unwrap_or(&[]).to_vec();
// let hang_frame = hang::Frame {
// payload: payload.into(),
// timestamp: std::time::Duration::from_nanos(
// self.frame_count.saturating_sub(1) * 1_000_000_000
// / self.opts.framerate as u64,
// ),
// keyframe: packet.is_key(),
// };
// Ok(Some(hang_frame))
// }
// Err(ffmpeg::Error::Eof) => Ok(None),
// Err(ffmpeg::Error::Other { errno }) if errno == ffmpeg::util::error::EAGAIN => Ok(None),
// Err(e) => Err(e.into()),
// }
// }
// }

View file

@ -0,0 +1,138 @@
use std::time::Duration;
use bytes::{BufMut, BytesMut};
use ffmpeg_next::util::{format::pixel::Pixel, frame::video::Video as FfmpegFrame};
use hang::Timestamp;
use image::{Delay, RgbaImage};
pub(crate) use self::mjpg_decoder::MjpgDecoder;
pub(crate) use self::rescaler::Rescaler;
use crate::av::{self, DecodedFrame, PixelFormat, VideoFormat, VideoFrame};
mod mjpg_decoder;
mod rescaler;
#[derive(Default, Debug)]
pub(crate) struct StreamClock {
pub(crate) last_timestamp: Option<hang::Timestamp>,
}
impl StreamClock {
pub(crate) fn frame_delay(&mut self, timestamp: &hang::Timestamp) -> Duration {
// Compute interframe delay from provided timestamps
let delay = match self.last_timestamp {
None => Duration::ZERO,
Some(last_timestamp) => timestamp
.checked_sub(last_timestamp)
.unwrap_or(Timestamp::ZERO)
.into(),
};
self.last_timestamp = Some(*timestamp);
delay
}
}
impl av::VideoFrame {
pub fn to_ffmpeg(&self) -> FfmpegFrame {
// Wrap raw RGBA/BGRA data into an ffmpeg frame and encode
let pixel = match self.format.pixel_format {
av::PixelFormat::Rgba => Pixel::RGBA,
av::PixelFormat::Bgra => Pixel::BGRA,
};
let [w, h] = self.format.dimensions;
let mut ff = FfmpegFrame::new(pixel, w, h);
let stride = ff.stride(0) as usize;
let row_bytes = (w as usize) * 4;
for y in 0..(h as usize) {
let dst_off = y * stride;
let src_off = y * row_bytes;
ff.data_mut(0)[dst_off..dst_off + row_bytes]
.copy_from_slice(&self.raw[src_off..src_off + row_bytes]);
}
ff
}
}
impl av::DecodedFrame {
pub fn from_ffmpeg(frame: &FfmpegFrame, delay: Duration, timestamp: Duration) -> Self {
let image = ffmpeg_frame_to_image(frame);
// Compute interframe delay from provided timestamps
let delay = Delay::from_saturating_duration(delay);
DecodedFrame {
frame: image::Frame::from_parts(image, 0, 0, delay),
timestamp,
}
}
}
/// Convert the ffmpeg frame into an [image] frame.
///
/// Note: This does not do any color conversion. Make sure the frame is in the correct color format before.
///
/// This allocates the full frame into a vec, which we need anyway to cross the thread boundary.
pub(crate) fn ffmpeg_frame_to_image(frame: &ffmpeg_next::util::frame::Video) -> image::RgbaImage {
let width = frame.width();
let height = frame.height();
let bytes_per_pixel = 4usize; // BGRA
let src = frame.data(0);
// ffmpeg frames may have padding at end of each line; copy row-by-row.
let stride = frame.stride(0) as usize;
let row_bytes = (width as usize) * bytes_per_pixel;
let mut out = vec![0u8; row_bytes * (height as usize)];
for y in 0..(height as usize) {
let src_off = y * stride;
let dst_off = y * row_bytes;
out[dst_off..dst_off + row_bytes].copy_from_slice(&src[src_off..src_off + row_bytes]);
}
RgbaImage::from_raw(width, height, out).expect("valid image buffer")
}
impl PixelFormat {
pub fn to_ffmpeg(&self) -> Pixel {
match self {
PixelFormat::Rgba => Pixel::RGBA,
PixelFormat::Bgra => Pixel::BGRA,
}
}
pub fn from_ffmpeg(value: Pixel) -> Option<Self> {
match value {
Pixel::RGBA => Some(PixelFormat::Rgba),
Pixel::BGRA => Some(PixelFormat::Bgra),
_ => None,
}
}
}
/// Convert the ffmpeg frame into a [`VideoFrame`]
///
/// Returns `None` if the frame has an unsupported pixel format.
///
/// This allocates the full frame into a vec, which we need anyway to cross the thread boundary.
pub(crate) fn ffmpeg_frame_to_video_frame(
frame: &ffmpeg_next::util::frame::Video,
) -> Option<VideoFrame> {
let pixel_format = PixelFormat::from_ffmpeg(frame.format())?;
let width = frame.width();
let height = frame.height();
let bytes_per_pixel = 4usize; // RGBA/BGRA
let src = frame.data(0);
// ffmpeg frames may have padding at end of each line; copy row-by-row.
let stride = frame.stride(0) as usize;
let row_bytes = (width as usize) * bytes_per_pixel;
let mut out = BytesMut::with_capacity(row_bytes * (height as usize));
// let mut out = vec![0u8; row_bytes * (height as usize)];
for y in 0..(height as usize) {
let src_off = y * stride;
// let dst_off = y * row_bytes;
out.put(&src[src_off..src_off + row_bytes]);
// out[dst_off..dst_off + row_bytes].copy_from_slice(&src[src_off..src_off + row_bytes]);
}
Some(VideoFrame {
format: VideoFormat {
dimensions: [width, height],
pixel_format,
},
raw: out.freeze(),
})
}

View file

@ -0,0 +1,74 @@
use std::time::Instant;
use ffmpeg_next::{
self as ffmpeg, Error, Packet, codec::Id, format::Pixel, frame::Video as FfmpegVideoFrame,
};
use tracing::trace;
use crate::{
av::VideoFrame,
ffmpeg::util::{Rescaler, ffmpeg_frame_to_video_frame},
};
pub struct MjpgDecoder {
dec: ffmpeg::decoder::Video,
rescaler: Rescaler,
}
impl MjpgDecoder {
/// Initialize FFmpeg and create a Video decoder for MJPEG.
pub fn new() -> anyhow::Result<Self> {
ffmpeg::init()?;
// Find the MJPEG decoder and create a context bound to it.
let mjpeg = ffmpeg::decoder::find(Id::MJPEG).ok_or(Error::DecoderNotFound)?;
// Create a codec::Context that's pre-bound to this decoder codec,
// then get a video decoder out of it.
let ctx = ffmpeg::codec::context::Context::new_with_codec(mjpeg);
let dec = ctx.decoder().video()?; // has send_packet/receive_frame
let rescaler = Rescaler::new(Pixel::RGBA, None)?;
Ok(Self { dec, rescaler })
}
/// Decode one complete MJPEG/JPEG frame from `mjpg_frame`.
pub fn decode_frame(&mut self, mjpg_frame: &[u8]) -> Result<VideoFrame, Error> {
let now = Instant::now();
// Make a packet that borrows/copies the data.
let packet = Packet::borrow(mjpg_frame);
// Feed & drain once — MJPEG is intra-only (one picture per packet).
self.dec.send_packet(&packet)?;
trace!(t=?now.elapsed(), "decode ffmpeg: send packet");
let mut frame = FfmpegVideoFrame::empty();
self.dec.receive_frame(&mut frame)?;
trace!(t=?now.elapsed(), "decode ffmpeg: receive frame");
// MJPEG may output deprecated YUVJ* formats. Replace them with
// the non-deprecated equivalents and mark full range to keep semantics.
// This avoids ffmpeg warning: "deprecated pixel format used, make sure you did set range correctly".
use ffmpeg_next::util::color::Range;
match frame.format() {
Pixel::YUVJ420P => {
frame.set_color_range(Range::JPEG);
frame.set_format(Pixel::YUV420P);
}
Pixel::YUVJ422P => {
frame.set_color_range(Range::JPEG);
frame.set_format(Pixel::YUV422P);
}
Pixel::YUVJ444P => {
frame.set_color_range(Range::JPEG);
frame.set_format(Pixel::YUV444P);
}
_ => {}
}
trace!(t=?now.elapsed(), "decode ffmpeg: color");
let frame = self.rescaler.process(&frame)?;
trace!(t=?now.elapsed(), "decode ffmpeg: rescale");
let frame = ffmpeg_frame_to_video_frame(frame).expect("valid pixel format set in rescaler");
trace!(t=?now.elapsed(), "decode ffmpeg: convert");
Ok(frame)
}
}

View file

@ -0,0 +1,77 @@
use anyhow::Result;
use ffmpeg_next::software::scaling::Flags;
use ffmpeg_next::{
self as ffmpeg,
software::scaling::{self},
util::{format::pixel::Pixel, frame::video::Video as FfmpegFrame},
};
pub(crate) struct Rescaler {
pub(crate) target_format: Pixel,
pub(crate) target_width_height: Option<(u32, u32)>,
pub(crate) ctx: Option<scaling::Context>,
pub(crate) out_frame: FfmpegFrame,
}
// I think the ffmpeg structs are send-safe.
// We want to create the encoder before moving it to a thread.
unsafe impl Send for Rescaler {}
impl Rescaler {
pub fn new(target_format: Pixel, target_width_height: Option<(u32, u32)>) -> Result<Self> {
Ok(Self {
target_format,
ctx: None,
target_width_height,
out_frame: FfmpegFrame::empty(),
})
}
pub fn set_target_dimensions(&mut self, w: u32, h: u32) {
self.target_width_height = Some((w, h));
}
pub fn process<'a: 'b, 'b>(
&'a mut self,
frame: &'b FfmpegFrame,
) -> Result<&'b FfmpegFrame, ffmpeg::Error> {
// Short-circuit if possible.
if self.target_width_height.is_none() && self.target_format == frame.format() {
return Ok(frame);
}
let (target_width, target_height) = self
.target_width_height
.unwrap_or_else(|| (frame.width(), frame.height()));
let out_frame_needs_reset = self.out_frame.width() != target_width
|| self.out_frame.height() != target_height
|| self.out_frame.format() != self.target_format;
if out_frame_needs_reset {
self.out_frame = FfmpegFrame::new(self.target_format, target_width, target_height);
}
let ctx = match self.ctx {
None => self.ctx.insert(scaling::Context::get(
frame.format(),
frame.width(),
frame.height(),
self.out_frame.format(),
self.out_frame.width(),
self.out_frame.height(),
Flags::BILINEAR,
)?),
Some(ref mut ctx) => ctx,
};
// This resets the context if any parameters changed.
ctx.cached(
frame.format(),
frame.width(),
frame.height(),
self.out_frame.format(),
self.out_frame.width(),
self.out_frame.height(),
Flags::BILINEAR,
);
ctx.run(&frame, &mut self.out_frame)?;
Ok(&self.out_frame)
}
}

View file

@ -0,0 +1,9 @@
pub mod audio;
pub mod av;
pub mod capture;
pub mod ffmpeg;
pub mod publish;
pub mod subscribe;
mod util;
pub use audio::AudioBackend;

View file

@ -0,0 +1,594 @@
use std::{
collections::{BTreeMap, HashMap},
sync::{
Arc, Mutex,
atomic::{AtomicBool, AtomicU32, Ordering},
},
time::{Duration, Instant},
};
use anyhow::Context;
use hang::catalog::{AudioConfig, Catalog, CatalogProducer, VideoConfig};
use moq_lite::BroadcastProducer;
use n0_error::Result;
use n0_future::task::AbortOnDropHandle;
use tokio_util::sync::{CancellationToken, DropGuard};
use tracing::{debug, error, info, info_span, trace, warn};
use crate::{
av::{
AudioEncoder, AudioEncoderInner, AudioPreset, AudioSource, DecodeConfig, VideoEncoder,
VideoEncoderInner, VideoPreset, VideoSource,
},
subscribe::WatchTrack,
util::spawn_thread,
};
pub struct PublishBroadcast {
producer: BroadcastProducer,
catalog: CatalogProducer,
state: Arc<Mutex<State>>,
_task: Arc<AbortOnDropHandle<()>>,
}
impl PublishBroadcast {
pub fn new() -> Self {
let mut producer = BroadcastProducer::default();
let catalog = Catalog::default().produce();
producer.insert_track(catalog.consumer.track);
let catalog = catalog.producer;
let state = Arc::new(Mutex::new(State::default()));
let task_handle = tokio::spawn(Self::run(state.clone(), producer.clone()));
Self {
producer,
catalog,
state,
_task: Arc::new(AbortOnDropHandle::new(task_handle)),
}
}
pub fn producer(&self) -> BroadcastProducer {
self.producer.clone()
}
async fn run(state: Arc<Mutex<State>>, mut producer: BroadcastProducer) {
while let Some(track) = producer.requested_track().await {
let name = track.info.name.clone();
if state
.lock()
.expect("poisoned")
.start_track(track.clone())
.inspect_err(|err| warn!(%name, "failed to start requested track: {err:#}"))
.is_ok()
{
info!("started track: {name}");
tokio::spawn({
let state = state.clone();
async move {
track.unused().await;
info!("stopping track: {name}");
state.lock().expect("poisoned").stop_track(&name);
}
});
}
}
}
/// Create a local WatchTrack from the current video source, if present.
pub fn watch_local(&self, decode_config: DecodeConfig) -> Option<WatchTrack> {
let (source, shutdown) = {
let state = self.state.lock().expect("poisoned");
let source = state
.available_video
.as_ref()
.map(|video| video.source.clone())?;
Some((source, state.shutdown_token.child_token()))
}?;
Some(WatchTrack::from_video_source(
"local".to_string(),
shutdown,
source,
decode_config,
))
}
pub fn set_video(&mut self, renditions: Option<VideoRenditions>) -> Result<()> {
match renditions {
Some(renditions) => {
let priority = 1u8;
let configs = renditions.available_renditions()?;
let video = hang::catalog::Video {
renditions: configs,
priority,
display: None,
rotation: None,
flip: None,
};
{
let mut catalog = self.catalog.lock();
catalog.video = Some(video);
}
self.state.lock().expect("poisoned").available_video = Some(renditions);
// TODO: Drop active encodings if their rendition is no longer available?
}
None => {
// Clear catalog and stop any active video encoders
self.state.lock().expect("poisoned").remove_video();
{
let mut catalog = self.catalog.lock();
catalog.video = None;
}
}
}
Ok(())
}
pub fn set_audio(&mut self, renditions: Option<AudioRenditions>) -> Result<()> {
match renditions {
Some(renditions) => {
let priority = 2u8;
let configs = renditions.available_renditions()?;
let audio = hang::catalog::Audio {
renditions: configs,
priority,
};
{
let mut catalog = self.catalog.lock();
catalog.audio = Some(audio);
}
self.state.lock().expect("poisoned").available_audio = Some(renditions);
}
None => {
// Clear catalog and stop any active audio encoders
self.state.lock().expect("poisoned").remove_audio();
{
let mut catalog = self.catalog.lock();
catalog.audio = None;
}
}
}
Ok(())
}
}
impl Drop for PublishBroadcast {
fn drop(&mut self) {
self.state.lock().expect("poisoned").shutdown_token.cancel();
self.producer.close();
}
}
#[derive(Default)]
struct State {
shutdown_token: CancellationToken,
available_video: Option<VideoRenditions>,
available_audio: Option<AudioRenditions>,
active_video: HashMap<String, EncoderThread>,
active_audio: HashMap<String, EncoderThread>,
}
impl State {
fn stop_track(&mut self, name: &str) {
let thread = self
.active_video
.remove(name)
.or_else(|| self.active_audio.remove(name));
if let Some(thread) = thread {
thread.shutdown.cancel();
}
}
fn remove_audio(&mut self) {
for (_name, thread) in self.active_audio.drain() {
thread.shutdown.cancel();
}
self.available_audio = None;
}
fn remove_video(&mut self) {
for (_name, thread) in self.active_video.drain() {
thread.shutdown.cancel();
}
self.available_video = None;
}
fn start_track(&mut self, track: moq_lite::TrackProducer) -> Result<()> {
let name = track.info.name.clone();
let track = hang::TrackProducer::new(track);
let shutdown_token = self.shutdown_token.child_token();
if let Some(video) = self.available_video.as_mut()
&& video.contains_rendition(&name)
{
let thread = video.start_encoder(&name, track, shutdown_token)?;
self.active_video.insert(name, thread);
Ok(())
} else if let Some(audio) = self.available_audio.as_mut()
&& audio.contains_rendition(&name)
{
let thread = audio.start_encoder(&name, track, shutdown_token)?;
self.active_audio.insert(name, thread);
Ok(())
} else {
info!("ignoring track request {name}: rendition not available");
Err(n0_error::anyerr!("rendition not available"))
}
}
}
pub struct AudioRenditions {
make_encoder: Box<dyn Fn(AudioPreset) -> Result<Box<dyn AudioEncoder>> + Send>,
source: Box<dyn AudioSource>,
renditions: HashMap<String, AudioPreset>,
}
impl AudioRenditions {
pub fn new<E: AudioEncoder>(
source: impl AudioSource,
presets: impl IntoIterator<Item = AudioPreset>,
) -> Self {
let renditions = presets
.into_iter()
.map(|preset| (format!("audio-{preset}"), preset))
.collect();
let format = source.format();
Self {
make_encoder: Box::new(move |preset| Ok(Box::new(E::with_preset(format, preset)?))),
renditions,
source: Box::new(source),
}
}
pub fn available_renditions(&self) -> Result<BTreeMap<String, AudioConfig>> {
let mut renditions = BTreeMap::new();
for (name, preset) in self.renditions.iter() {
// We need to create the encoder to get the config, even though we drop it
// again (it will be created on deman). Not ideal, but works for now.
let config = (self.make_encoder)(*preset)?.config();
renditions.insert(name.clone(), config);
}
Ok(renditions)
}
pub fn encoder(&mut self, name: &str) -> Option<Result<Box<dyn AudioEncoder>>> {
let preset = self.renditions.get(name)?;
Some((self.make_encoder)(*preset))
}
pub fn contains_rendition(&self, name: &str) -> bool {
self.renditions.contains_key(name)
}
pub fn start_encoder(
&mut self,
name: &str,
producer: hang::TrackProducer,
shutdown_token: CancellationToken,
) -> Result<EncoderThread> {
let preset = self
.renditions
.get(name)
.context("rendition not available")?;
let encoder = (self.make_encoder)(*preset)?;
let thread = EncoderThread::spawn_audio(
self.source.cloned_boxed(),
encoder,
producer,
shutdown_token,
);
Ok(thread)
}
}
pub struct VideoRenditions {
make_encoder: Box<dyn Fn(VideoPreset) -> Result<Box<dyn VideoEncoder>> + Send>,
source: SharedVideoSource,
renditions: HashMap<String, VideoPreset>,
_shared_source_cancel_guard: DropGuard,
}
impl VideoRenditions {
pub fn new<E: VideoEncoder>(
source: impl VideoSource,
presets: impl IntoIterator<Item = VideoPreset>,
) -> Self {
let shutdown_token = CancellationToken::new();
let source = SharedVideoSource::new(source, shutdown_token.clone());
let renditions = presets
.into_iter()
.map(|preset| (format!("video-{preset}"), preset))
.collect();
Self {
make_encoder: Box::new(|preset| Ok(Box::new(E::with_preset(preset)?))),
renditions,
source,
_shared_source_cancel_guard: shutdown_token.drop_guard(),
}
}
pub fn available_renditions(&self) -> Result<BTreeMap<String, VideoConfig>> {
let mut renditions = BTreeMap::new();
for (name, preset) in self.renditions.iter() {
// We need to create the encoder to get the config, even though we drop it
// again (it will be created on deman). Not ideal, but works for now.
let config = (self.make_encoder)(*preset)?.config();
renditions.insert(name.clone(), config);
}
Ok(renditions)
}
pub fn contains_rendition(&self, name: &str) -> bool {
self.renditions.contains_key(name)
}
pub fn start_encoder(
&mut self,
name: &str,
producer: hang::TrackProducer,
shutdown_token: CancellationToken,
) -> Result<EncoderThread> {
let preset = self
.renditions
.get(name)
.context("rendition not available")?;
let encoder = (self.make_encoder)(*preset)?;
let thread =
EncoderThread::spawn_video(self.source.clone(), encoder, producer, shutdown_token);
Ok(thread)
}
}
#[derive(Debug, Clone)]
pub(crate) struct SharedVideoSource {
name: String,
frames_rx: tokio::sync::watch::Receiver<Option<crate::av::VideoFrame>>,
format: crate::av::VideoFormat,
running: Arc<AtomicBool>,
thread: Arc<std::thread::JoinHandle<()>>,
subscriber_count: Arc<AtomicU32>,
}
impl SharedVideoSource {
fn new(mut source: impl VideoSource, shutdown: CancellationToken) -> Self {
let name = source.name().to_string();
let format = source.format();
let (tx, rx) = tokio::sync::watch::channel(None);
let running = Arc::new(AtomicBool::new(false));
let thread = spawn_thread(format!("vshr-{}", source.name()), {
let shutdown = shutdown.clone();
let running = running.clone();
move || {
let frame_time = Duration::from_secs_f32(1. / 30.);
let start = Instant::now();
for i in 0.. {
if shutdown.is_cancelled() {
break;
}
loop {
if running.load(Ordering::Relaxed) {
break;
}
if let Err(err) = source.stop() {
warn!("Failed to stop video source: {err:#}");
}
std::thread::park();
if let Err(err) = source.start() {
warn!("Failed to stop video source: {err:#}");
}
}
match source.pop_frame() {
Ok(Some(frame)) => {
let _ = tx.send(Some(frame));
}
Ok(None) => {}
Err(_) => break,
}
let expected = frame_time * i;
let actual = start.elapsed();
if actual < expected {
std::thread::sleep(expected - actual);
}
}
}
});
Self {
name,
format,
frames_rx: rx,
thread: Arc::new(thread),
running,
subscriber_count: Default::default(),
}
}
}
impl VideoSource for SharedVideoSource {
fn name(&self) -> &str {
&self.name
}
fn format(&self) -> crate::av::VideoFormat {
self.format.clone()
}
fn start(&mut self) -> anyhow::Result<()> {
let prev_count = self.subscriber_count.fetch_add(1, Ordering::Relaxed);
if prev_count == 0 {
self.running.store(true, Ordering::Relaxed);
self.thread.thread().unpark();
}
Ok(())
}
fn stop(&mut self) -> anyhow::Result<()> {
if self
.subscriber_count
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |val| {
Some(val.saturating_sub(1))
})
.expect("always returns Some")
== 1
{
self.running.store(false, Ordering::Relaxed);
}
Ok(())
}
fn pop_frame(&mut self) -> anyhow::Result<Option<crate::av::VideoFrame>> {
let frame = self.frames_rx.borrow_and_update().clone();
Ok(frame)
}
}
pub struct EncoderThread {
_thread_handle: std::thread::JoinHandle<()>,
shutdown: CancellationToken,
}
impl EncoderThread {
pub fn spawn_video(
mut source: impl VideoSource,
mut encoder: impl VideoEncoderInner,
mut producer: hang::TrackProducer,
shutdown: CancellationToken,
) -> Self {
let thread_name = format!("venc-{:<4}-{:<4}", source.name(), encoder.name());
let span = info_span!("videoenc", source = source.name(), encoder = encoder.name());
let handle = spawn_thread(thread_name, {
let shutdown = shutdown.clone();
move || {
let _guard = span.enter();
if let Err(err) = source.start() {
warn!("video source failed to start: {err:#}");
return;
}
let format = source.format();
tracing::debug!(
src_format = ?format,
dst_config = ?encoder.config(),
"video encoder thread start"
);
let framerate = encoder.config().framerate.unwrap_or(30.0);
let interval = Duration::from_secs_f64(1. / framerate);
loop {
let start = Instant::now();
if shutdown.is_cancelled() {
debug!("stop video encoder: cancelled");
break;
}
let frame = match source.pop_frame() {
Ok(frame) => frame,
Err(err) => {
warn!("video encoder failed: {err:#}");
break;
}
};
if let Some(frame) = frame {
if let Err(err) = encoder.push_frame(frame) {
warn!("video encoder failed: {err:#}");
break;
};
while let Ok(Some(pkt)) = encoder.pop_packet() {
if let Err(err) = producer.write(pkt) {
warn!("failed to write frame to producer: {err:#}");
}
}
}
std::thread::sleep(interval.saturating_sub(start.elapsed()));
}
producer.inner.close();
if let Err(err) = source.stop() {
warn!("video source failed to stop: {err:#}");
}
tracing::debug!("video encoder thread stop");
}
});
Self {
_thread_handle: handle,
shutdown,
}
}
pub fn spawn_audio(
mut source: Box<dyn AudioSource>,
mut encoder: impl AudioEncoderInner,
mut producer: hang::TrackProducer,
shutdown: CancellationToken,
) -> Self {
let sd = shutdown.clone();
let name = encoder.name();
let thread_name = format!("aenc-{:<4}", name);
let span = info_span!("audioenc", %name);
let handle = spawn_thread(thread_name, move || {
let _guard = span.enter();
tracing::debug!(config=?encoder.config(), "audio encoder thread start");
let shutdown = sd;
// 20ms framing to align with typical Opus config (48kHz → 960 samples/ch)
const INTERVAL: Duration = Duration::from_millis(20);
let format = source.format();
let samples_per_frame = (format.sample_rate / 1000) * INTERVAL.as_millis() as u32;
let mut buf = vec![0.0f32; samples_per_frame as usize * format.channel_count as usize];
let start = Instant::now();
for tick in 0.. {
trace!("tick");
if shutdown.is_cancelled() {
break;
}
match source.pop_samples(&mut buf) {
Ok(Some(_n)) => {
// Expect a full frame; if shorter, zero-pad via slice len
if let Err(err) = encoder.push_samples(&buf) {
error!(buf_len = buf.len(), "audio push_samples failed: {err:#}");
break;
}
while let Ok(Some(pkt)) = encoder
.pop_packet()
.inspect_err(|err| warn!("encoder error: {err:#}"))
{
if let Err(err) = producer.write(pkt) {
warn!("failed to write frame to producer: {err:#}");
}
}
}
Ok(None) => {
// keep pacing
}
Err(err) => {
error!("audio source failed: {err:#}");
break;
}
}
let expected_time = (tick + 1) * INTERVAL;
let actual_time = start.elapsed();
if actual_time > expected_time {
warn!("audio thread too slow by {:?}", actual_time - expected_time);
}
let sleep = expected_time.saturating_sub(start.elapsed());
if sleep > Duration::ZERO {
std::thread::sleep(sleep);
}
}
// drain
while let Ok(Some(pkt)) = encoder.pop_packet() {
if let Err(err) = producer.write(pkt) {
warn!("failed to write frame to producer: {err:#}");
}
}
producer.inner.close();
tracing::debug!("audio encoder thread stop");
});
Self {
_thread_handle: handle,
shutdown,
}
}
}
impl Drop for EncoderThread {
fn drop(&mut self) {
self.shutdown.cancel();
}
}

View file

@ -0,0 +1,712 @@
use std::{collections::BTreeMap, sync::Arc, time::Duration};
use hang::{
Timestamp, TrackConsumer,
catalog::{AudioConfig, Catalog, CatalogConsumer, VideoConfig},
};
use moq_lite::{BroadcastConsumer, Track};
use n0_error::{Result, StackResultExt, StdResultExt};
use n0_future::task::AbortOnDropHandle;
use n0_watcher::{Watchable, Watcher};
use tokio::{
sync::mpsc::{self, error::TryRecvError},
time::Instant,
};
use tokio_util::sync::{CancellationToken, DropGuard};
use tracing::{Span, debug, error, info, info_span, trace, warn};
use crate::{
av::{
AudioDecoder, AudioSink, AudioSinkHandle, DecodeConfig, DecodedFrame, Decoders,
PlaybackConfig, Quality, VideoDecoder, VideoSource,
},
ffmpeg::util::Rescaler,
util::spawn_thread,
};
const DEFAULT_MAX_LATENCY: Duration = Duration::from_millis(150);
#[derive(derive_more::Debug, Clone)]
pub struct SubscribeBroadcast {
broadcast_name: String,
#[debug("BroadcastConsumer")]
broadcast: BroadcastConsumer,
// catalog_watcher: n0_watcher::Direct<CatalogWrapper>,
catalog_watchable: Watchable<CatalogWrapper>,
shutdown: CancellationToken,
_catalog_task: Arc<AbortOnDropHandle<()>>,
}
#[derive(Debug, derive_more::PartialEq, derive_more::Eq, Default, Clone, derive_more::Deref)]
pub struct CatalogWrapper {
#[eq(skip)]
#[deref]
inner: Arc<Catalog>,
seq: usize,
}
impl CatalogWrapper {
fn new(inner: Catalog, seq: usize) -> Self {
Self {
inner: Arc::new(inner),
seq,
}
}
pub fn video_renditions(&self) -> impl Iterator<Item = &str> {
let mut renditions: Vec<_> = self
.inner
.video
.as_ref()
.iter()
.map(|v| v.renditions.iter())
.flatten()
.map(|(name, config)| (name.as_str(), config.coded_width))
.collect();
renditions.sort_by(|a, b| a.1.cmp(&b.1));
renditions.into_iter().map(|(name, _w)| name)
}
pub fn audio_renditions(&self) -> impl Iterator<Item = &str> + '_ {
self.inner
.audio
.as_ref()
.into_iter()
.map(|v| v.renditions.iter())
.flatten()
.map(|(name, _config)| name.as_str())
}
pub fn select_video_rendition(&self, quality: Quality) -> Result<String> {
let video = self.inner.video.as_ref().context("no video published")?;
let track_name =
select_video_rendition(&video.renditions, quality).context("no video renditions")?;
Ok(track_name)
}
pub fn select_audio_rendition(&self, quality: Quality) -> Result<String> {
let audio = self.inner.audio.as_ref().context("no video published")?;
let track_name =
select_audio_rendition(&audio.renditions, quality).context("no video renditions")?;
Ok(track_name)
}
}
impl CatalogWrapper {
pub fn into_inner(self) -> Arc<Catalog> {
self.inner
}
}
impl SubscribeBroadcast {
pub async fn new(broadcast_name: String, broadcast: BroadcastConsumer) -> Result<Self> {
let shutdown = CancellationToken::new();
let (catalog_watchable, catalog_task) = {
let track = broadcast.subscribe_track(&Catalog::default_track());
let mut consumer = CatalogConsumer::new(track);
let initial_catalog = consumer
.next()
.await
.std_context("Broadcast closed before receiving catalog")?
.context("Catalog track closed before receiving catalog")?;
let watchable = Watchable::new(CatalogWrapper::new(initial_catalog, 0));
let task = tokio::spawn({
let shutdown = shutdown.clone();
let watchable = watchable.clone();
async move {
for seq in 1.. {
match consumer.next().await {
Ok(Some(catalog)) => {
watchable.set(CatalogWrapper::new(catalog, seq)).ok();
}
Ok(None) => {
debug!("subscribed broadcast catalog track ended");
break;
}
Err(err) => {
debug!("subscribed broadcast closed: {err:#}");
break;
}
}
}
shutdown.cancel();
}
});
(watchable, task)
};
Ok(Self {
broadcast_name,
broadcast,
catalog_watchable,
_catalog_task: Arc::new(AbortOnDropHandle::new(catalog_task)),
shutdown: CancellationToken::new(),
})
}
pub fn broadcast_name(&self) -> &str {
&self.broadcast_name
}
pub fn catalog_watcher(&mut self) -> n0_watcher::Direct<CatalogWrapper> {
self.catalog_watchable.watch()
}
pub fn catalog(&self) -> CatalogWrapper {
self.catalog_watchable.get()
}
pub fn watch_and_listen<D: Decoders>(
self,
audio_out: impl AudioSink,
playback_config: PlaybackConfig,
) -> Result<AvRemoteTrack> {
AvRemoteTrack::new::<D>(self, audio_out, playback_config)
}
pub fn watch<D: VideoDecoder>(&self) -> Result<WatchTrack> {
self.watch_with::<D>(&Default::default(), Quality::Highest)
}
pub fn watch_with<D: VideoDecoder>(
&self,
playback_config: &DecodeConfig,
quality: Quality,
) -> Result<WatchTrack> {
let track_name = self.catalog().select_video_rendition(quality)?;
self.watch_rendition::<D>(playback_config, &track_name)
}
pub fn watch_rendition<D: VideoDecoder>(
&self,
playback_config: &DecodeConfig,
track_name: &str,
) -> Result<WatchTrack> {
let catalog = self.catalog();
let video = catalog.video.as_ref().context("no video published")?;
let config = video
.renditions
.get(track_name)
.context("rendition not found")?;
let consumer = TrackConsumer::new(
self.broadcast.subscribe_track(&Track {
name: track_name.to_string(),
priority: video.priority,
}),
DEFAULT_MAX_LATENCY,
);
let span = info_span!("videodec", %track_name);
WatchTrack::from_consumer::<D>(
track_name.to_string(),
consumer,
&config,
playback_config,
self.shutdown.child_token(),
span,
)
}
pub fn listen<D: AudioDecoder>(&self, output: impl AudioSink) -> Result<AudioTrack> {
self.listen_with::<D>(Quality::Highest, output)
}
pub fn listen_with<D: AudioDecoder>(
&self,
quality: Quality,
output: impl AudioSink,
) -> Result<AudioTrack> {
let track_name = self.catalog().select_audio_rendition(quality)?;
self.listen_rendition::<D>(&track_name, output)
}
pub fn listen_rendition<D: AudioDecoder>(
&self,
name: &str,
output: impl AudioSink,
) -> Result<AudioTrack> {
let catalog = self.catalog();
let audio = catalog.audio.as_ref().context("no audio published")?;
let config = audio.renditions.get(name).context("rendition not found")?;
let consumer = TrackConsumer::new(
self.broadcast.subscribe_track(&Track {
name: name.to_string(),
priority: audio.priority,
}),
DEFAULT_MAX_LATENCY,
);
let span = info_span!("audiodec", %name);
AudioTrack::spawn::<D>(
name.to_string(),
consumer,
config.clone(),
output,
self.shutdown.child_token(),
span,
)
}
pub fn closed(&self) -> impl Future<Output = ()> + 'static {
self.broadcast.closed()
}
pub fn shutdown(&self) {
self.shutdown.cancel();
}
}
fn select_rendition<T, P: ToString>(
renditions: &BTreeMap<String, T>,
order: &[P],
) -> Option<String> {
order
.iter()
.map(ToString::to_string)
.find(|k| renditions.contains_key(k.as_str()))
.or_else(|| renditions.keys().next().cloned())
}
fn select_video_rendition<'a, T>(
renditions: &'a BTreeMap<String, T>,
q: Quality,
) -> Option<String> {
use crate::av::VideoPreset::*;
let order = match q {
Quality::Highest => [P1080, P720, P360, P180],
Quality::High => [P720, P360, P180, P1080],
Quality::Mid => [P360, P180, P720, P1080],
Quality::Low => [P180, P360, P720, P1080],
};
select_rendition(renditions, &order)
}
fn select_audio_rendition<'a, T>(
renditions: &'a BTreeMap<String, T>,
q: Quality,
) -> Option<String> {
use crate::av::AudioPreset::*;
let order = match q {
Quality::Highest | Quality::High => [Hq, Lq],
Quality::Mid | Quality::Low => [Lq, Hq],
};
select_rendition(renditions, &order)
}
pub struct AudioTrack {
name: String,
handle: Box<dyn AudioSinkHandle>,
shutdown_token: CancellationToken,
_task_handle: AbortOnDropHandle<()>,
_thread_handle: std::thread::JoinHandle<()>,
}
impl AudioTrack {
pub(crate) fn spawn<D: AudioDecoder>(
name: String,
consumer: TrackConsumer,
config: AudioConfig,
output: impl AudioSink,
shutdown: CancellationToken,
span: Span,
) -> Result<Self> {
let _guard = span.enter();
let (packet_tx, packet_rx) = mpsc::channel(32);
let output_format = output.format()?;
info!(?config, "audio thread start");
let decoder = D::new(&config, output_format)?;
let handle = output.handle();
let thread_name = format!("adec-{}", name);
let thread = spawn_thread(thread_name, {
let shutdown = shutdown.clone();
let span = span.clone();
move || {
let _guard = span.enter();
if let Err(err) = Self::run_loop(decoder, packet_rx, output, &shutdown) {
error!("audio decoder failed: {err:#}");
}
info!("audio decoder thread stop");
}
});
let task = tokio::spawn(forward_frames(consumer, packet_tx));
Ok(Self {
name,
handle,
shutdown_token: shutdown,
_task_handle: AbortOnDropHandle::new(task),
_thread_handle: thread,
})
}
pub fn stopped(&self) -> impl Future<Output = ()> + 'static {
let shutdown_token = self.shutdown_token.clone();
async move { shutdown_token.cancelled().await }
}
pub fn rendition(&self) -> &str {
&self.name
}
pub fn handle(&self) -> &dyn AudioSinkHandle {
self.handle.as_ref()
}
pub(crate) fn run_loop(
mut decoder: impl AudioDecoder,
mut packet_rx: mpsc::Receiver<hang::Frame>,
mut sink: impl AudioSink,
shutdown: &CancellationToken,
) -> Result<()> {
const INTERVAL: Duration = Duration::from_millis(10);
let mut remote_start = None;
let loop_start = Instant::now();
'main: for i in 0.. {
let tick = Instant::now();
if shutdown.is_cancelled() {
debug!("stop audio thread: cancelled");
break;
}
loop {
match packet_rx.try_recv() {
Ok(packet) => {
let remote_start = *remote_start.get_or_insert_with(|| packet.timestamp);
if tracing::enabled!(tracing::Level::TRACE) {
let loop_elapsed = tick.duration_since(loop_start);
let remote_elapsed: Duration = packet
.timestamp
.checked_sub(remote_start)
.unwrap_or(Timestamp::ZERO)
.into();
let diff_ms =
(loop_elapsed.as_secs_f32() - remote_elapsed.as_secs_f32()) * 1000.;
trace!(len = packet.payload.num_bytes(), ts=?packet.timestamp, ?loop_elapsed, ?remote_elapsed, ?diff_ms, "recv packet");
}
// TODO: Skip outdated packets?
if !sink.is_paused() {
decoder.push_packet(packet)?;
if let Some(samples) = decoder.pop_samples()? {
sink.push_samples(samples)?;
}
}
}
Err(TryRecvError::Disconnected) => {
debug!("stop audio thread: packet_rx disconnected");
break 'main;
}
Err(TryRecvError::Empty) => {
trace!("no packet to recv");
break;
}
}
}
let target_time = i * INTERVAL;
let real_time = Instant::now().duration_since(loop_start);
let sleep = target_time.saturating_sub(real_time);
if !sleep.is_zero() {
std::thread::sleep(sleep);
}
}
shutdown.cancel();
Ok(())
}
}
impl Drop for AudioTrack {
fn drop(&mut self) {
self.shutdown_token.cancel();
}
}
pub struct WatchTrack {
video_frames: WatchTrackFrames,
handle: WatchTrackHandle,
}
pub struct WatchTrackHandle {
rendition: String,
viewport: Watchable<(u32, u32)>,
_guard: WatchTrackGuard,
}
impl WatchTrackHandle {
pub fn set_viewport(&self, w: u32, h: u32) {
self.viewport.set((w, h)).ok();
}
pub fn rendition(&self) -> &str {
&self.rendition
}
}
pub struct WatchTrackFrames {
rx: mpsc::Receiver<DecodedFrame>,
}
impl WatchTrackFrames {
pub fn current_frame(&mut self) -> Option<DecodedFrame> {
let mut out = None;
while let Ok(item) = self.rx.try_recv() {
out = Some(item);
}
out
}
pub async fn next_frame(&mut self) -> Option<DecodedFrame> {
if let Some(frame) = self.current_frame() {
Some(frame)
} else {
self.rx.recv().await
}
}
}
struct WatchTrackGuard {
_shutdown_token_guard: DropGuard,
_task_handle: Option<AbortOnDropHandle<()>>,
_thread_handle: Option<std::thread::JoinHandle<()>>,
}
impl WatchTrack {
pub fn empty(rendition: impl ToString) -> Self {
let (tx, rx) = mpsc::channel(1);
let task = tokio::task::spawn(async move {
std::future::pending::<()>().await;
let _ = tx;
});
let guard = WatchTrackGuard {
_shutdown_token_guard: CancellationToken::new().drop_guard(),
_task_handle: Some(AbortOnDropHandle::new(task)),
_thread_handle: None,
};
Self {
video_frames: WatchTrackFrames { rx },
handle: WatchTrackHandle {
rendition: rendition.to_string(),
viewport: Default::default(),
_guard: guard,
},
}
}
pub fn from_video_source(
rendition: String,
shutdown: CancellationToken,
mut source: impl VideoSource,
decode_config: DecodeConfig,
) -> Self {
let viewport = Watchable::new((1u32, 1u32));
let (frame_tx, frame_rx) = tokio::sync::mpsc::channel::<DecodedFrame>(2);
let thread_name = format!("vpr-{:>4}-{:>4}", source.name(), rendition);
let thread = spawn_thread(thread_name, {
let mut viewport = viewport.watch();
let shutdown = shutdown.clone();
move || {
// TODO: Make configurable.
let fps = 30;
let mut rescaler = Rescaler::new(decode_config.pixel_format.to_ffmpeg(), None)
.expect("failed to create rescaler");
let frame_duration = Duration::from_secs_f32(1. / fps as f32);
if let Err(err) = source.start() {
warn!("Video source failed to start: {err:?}");
return;
}
let start = Instant::now();
for i in 1.. {
// let t = Instant::now();
if shutdown.is_cancelled() {
break;
}
if viewport.update() {
let (w, h) = viewport.peek();
rescaler.set_target_dimensions(*w, *h);
}
match source.pop_frame() {
Ok(Some(frame)) => {
// trace!(t=?t.elapsed(), "pop");
let frame = frame.to_ffmpeg();
let frame = rescaler.process(&frame).expect("rescaler failed");
let frame =
DecodedFrame::from_ffmpeg(frame, frame_duration, start.elapsed());
// trace!(t=?t.elapsed(), "convert");
let _ = frame_tx.blocking_send(frame);
// trace!(t=?t.elapsed(), "send");
}
Ok(None) => {}
Err(_) => break,
}
let expected_time = i * frame_duration;
let actual_time = start.elapsed();
if expected_time > actual_time {
std::thread::sleep(expected_time - actual_time);
// trace!(t=?t.elapsed(), slept=?(actual_time - expected_time), ?expected_time, ?actual_time, "done");
}
}
if let Err(err) = source.stop() {
warn!("Video source failed to stop: {err:?}");
return;
}
}
});
let guard = WatchTrackGuard {
_shutdown_token_guard: shutdown.drop_guard(),
_task_handle: None,
_thread_handle: Some(thread),
};
WatchTrack {
video_frames: WatchTrackFrames { rx: frame_rx },
handle: WatchTrackHandle {
rendition,
viewport,
_guard: guard,
},
}
}
pub(crate) fn from_consumer<D: VideoDecoder>(
rendition: String,
consumer: TrackConsumer,
config: &VideoConfig,
playback_config: &DecodeConfig,
shutdown: CancellationToken,
span: Span,
) -> Result<Self> {
let (packet_tx, packet_rx) = mpsc::channel(32);
let (frame_tx, frame_rx) = mpsc::channel(32);
let viewport = Watchable::new((1u32, 1u32));
let viewport_watcher = viewport.watch();
let _guard = span.enter();
debug!(?config, "video decoder start");
let decoder = D::new(config, playback_config)?;
let thread_name = format!("vdec-{}", rendition);
let thread = spawn_thread(thread_name, {
let shutdown = shutdown.clone();
let span = span.clone();
move || {
let _guard = span.enter();
if let Err(err) =
Self::run_loop(&shutdown, packet_rx, frame_tx, viewport_watcher, decoder)
{
error!("video decoder failed: {err:#}");
}
shutdown.cancel();
}
});
let task = tokio::task::spawn(forward_frames(consumer, packet_tx));
let guard = WatchTrackGuard {
_shutdown_token_guard: shutdown.drop_guard(),
_task_handle: Some(AbortOnDropHandle::new(task)),
_thread_handle: Some(thread),
};
Ok(WatchTrack {
video_frames: WatchTrackFrames { rx: frame_rx },
handle: WatchTrackHandle {
rendition,
viewport,
_guard: guard,
},
})
}
pub fn split(self) -> (WatchTrackFrames, WatchTrackHandle) {
(self.video_frames, self.handle)
}
pub fn set_viewport(&self, w: u32, h: u32) {
self.handle.set_viewport(w, h);
}
pub fn rendition(&self) -> &str {
self.handle.rendition()
}
pub fn current_frame(&mut self) -> Option<DecodedFrame> {
self.video_frames.current_frame()
}
pub(crate) fn run_loop(
shutdown: &CancellationToken,
mut input_rx: mpsc::Receiver<hang::Frame>,
output_tx: mpsc::Sender<DecodedFrame>,
mut viewport_watcher: n0_watcher::Direct<(u32, u32)>,
mut decoder: impl VideoDecoder,
) -> Result<(), anyhow::Error> {
loop {
if shutdown.is_cancelled() {
break;
}
let Some(packet) = input_rx.blocking_recv() else {
break;
};
if viewport_watcher.update() {
let (w, h) = viewport_watcher.peek();
decoder.set_viewport(*w, *h);
}
let t = Instant::now();
decoder
.push_packet(packet)
.context("failed to push packet")?;
trace!(t=?t.elapsed(), "videodec: push_packet");
while let Some(frame) = decoder.pop_frame().context("failed to pop frame")? {
trace!(t=?t.elapsed(), "videodec: pop frame");
if output_tx.blocking_send(frame).is_err() {
break;
}
trace!(t=?t.elapsed(), "videodec: tx");
}
}
Ok(())
}
}
async fn forward_frames(mut track: hang::TrackConsumer, sender: mpsc::Sender<hang::Frame>) {
loop {
let frame = track.read_frame().await;
match frame {
Ok(Some(frame)) => {
if sender.send(frame).await.is_err() {
break;
}
}
Ok(None) => break,
Err(err) => {
warn!("failed to read frame: {err:?}");
break;
}
}
}
}
pub struct AvRemoteTrack {
pub broadcast: SubscribeBroadcast,
pub video: Option<WatchTrack>,
pub audio: Option<AudioTrack>,
}
impl AvRemoteTrack {
pub fn new<D: Decoders>(
broadcast: SubscribeBroadcast,
audio_out: impl AudioSink,
playback_config: PlaybackConfig,
) -> Result<Self> {
let audio = broadcast
.listen_with::<D::Audio>(playback_config.quality, audio_out)
.inspect_err(|err| tracing::warn!("no audio track: {err}"))
.ok();
let video = broadcast
.watch_with::<D::Video>(&playback_config.decode_config, playback_config.quality)
.inspect_err(|err| tracing::warn!("no video track: {err}"))
.ok();
Ok(Self {
broadcast,
audio,
video,
})
}
}

View file

@ -0,0 +1,12 @@
/// Spawn a named OS thread and panic if spawning fails.
pub fn spawn_thread<F, T>(name: impl ToString, f: F) -> std::thread::JoinHandle<T>
where
F: FnOnce() -> T + Send + 'static,
T: Send + 'static,
{
let name_str = name.to_string();
std::thread::Builder::new()
.name(name_str.clone())
.spawn(f)
.expect(&format!("failed to spawn thread: {}", name_str))
}

View file

@ -0,0 +1,35 @@
[package]
name = "web-transport-iroh"
description = "WebTransport library for Iroh"
authors = ["Franz Heinzmann <frando@n0.computer>"]
repository = "https://github.com/n0-computer/iroh-live"
license = "MIT OR Apache-2.0"
version = "0.1.1"
edition = "2024"
keywords = ["quic", "http3", "webtransport", "iroh"]
categories = ["network-programming", "web-programming"]
[dependencies]
bytes = "1"
http = "1"
iroh = "0.96"
n0-error = "0.1.2"
n0-future = "0.3.1"
quinn = { package = "iroh-quinn", version = "0.16" }
thiserror = "2"
tokio = { version = "1", default-features = false, features = [
"io-util",
"macros",
] }
tracing = "0.1.41"
url = "2"
web-transport-proto = "0.3.0"
web-transport-trait = "0.3.0"
[dev-dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
n0-tracing-test = "0.3.0"
tokio = { version = "1", features = ["full"] }

View file

@ -0,0 +1,9 @@
[![crates.io](https://img.shields.io/crates/v/web-transport-quinn)](https://crates.io/crates/web-transport-quinn)
[![docs.rs](https://img.shields.io/docsrs/web-transport-quinn)](https://docs.rs/web-transport-quinn)
[![discord](https://img.shields.io/discord/1124083992740761730)](https://discord.gg/FCYF3p99mr)
# web-transport-iiroh
A wrapper around the Iroh API, implementing the [`web-transport-trait`](https://github.com/kixelated/web-transport/tree/main/web-transport-trait) traits.
Note that this does *not* actually implement WebTransport for iroh. Instead, it implements the WebTransport traits on raw iroh QUIC connection. Thus, you can use an iroh connection wherever the `web-transport-trait` traits are expected (i.e. in hang). But there's no actual HTTP/3 WebTransport session being established, we just use the raw iroh QUIC connection directly.

View file

@ -0,0 +1,71 @@
use std::sync::Arc;
use iroh::{EndpointAddr, endpoint::{ConnectOptions, QuicTransportConfig}};
use url::Url;
use crate::{ALPN_H3, ClientError, Session};
/// A client for connecting to a WebTransport server.
pub struct Client {
endpoint: iroh::Endpoint,
config: QuicTransportConfig,
}
impl Client {
pub fn new(endpoint: iroh::Endpoint) -> Self {
Self::with_transport_config(endpoint, Default::default())
}
/// Creates a client from an endpoint and a transport config.
pub fn with_transport_config(
endpoint: iroh::Endpoint,
config: QuicTransportConfig,
) -> Self {
Self { endpoint, config }
}
/// Connect to a server over QUIC without HTTP/3.
pub async fn connect_quic(
&self,
addr: impl Into<EndpointAddr>,
alpn: &[u8],
) -> Result<Session, ClientError> {
let conn = self.connect(addr, alpn).await?;
Ok(Session::raw(conn))
}
/// Connect with a full HTTP/3 handshake and WebTransport semantics.
///
/// Note that the url needs to have a `https:` scheme, otherwise the accepting side will
/// fail to accept the connection.
pub async fn connect_h3(
&self,
addr: impl Into<EndpointAddr>,
url: Url,
) -> Result<Session, ClientError> {
let conn = self.connect(addr, ALPN_H3.as_bytes()).await?;
// Connect with the connection we established.
Session::connect_h3(conn, url).await
}
async fn connect(
&self,
addr: impl Into<EndpointAddr>,
alpn: &[u8],
) -> Result<iroh::endpoint::Connection, ClientError> {
let opts = ConnectOptions::new().with_transport_config(self.config.clone());
let conn = self
.endpoint
.connect_with_opts(addr, alpn, opts)
.await
.map_err(|err| ClientError::Connect(Arc::new(err.into())))?;
let conn = conn
.await
.map_err(|err| ClientError::Connect(Arc::new(err.into())))?;
Ok(conn)
}
pub async fn close(&self) {
self.endpoint.close().await;
}
}

View file

@ -0,0 +1,125 @@
use web_transport_proto::{ConnectRequest, ConnectResponse, VarInt};
use thiserror::Error;
use url::Url;
#[derive(Error, Debug, Clone)]
pub enum ConnectError {
#[error("quic stream was closed early")]
UnexpectedEnd,
#[error("protocol error: {0}")]
ProtoError(#[from] web_transport_proto::ConnectError),
#[error("connection error")]
ConnectionError(#[from] iroh::endpoint::ConnectionError),
#[error("read error")]
ReadError(#[from] quinn::ReadError),
#[error("write error")]
WriteError(#[from] quinn::WriteError),
#[error("http error status: {0}")]
ErrorStatus(http::StatusCode),
}
pub struct Connect {
// The request that was sent by the client.
request: ConnectRequest,
// A reference to the send/recv stream, so we don't close it until dropped.
send: quinn::SendStream,
#[allow(dead_code)]
recv: quinn::RecvStream,
}
impl Connect {
pub async fn accept(conn: &iroh::endpoint::Connection) -> Result<Self, ConnectError> {
// Accept the stream that will be used to send the HTTP CONNECT request.
// If they try to send any other type of HTTP request, we will error out.
let (send, mut recv) = conn.accept_bi().await?;
let request = web_transport_proto::ConnectRequest::read(&mut recv).await?;
tracing::debug!("received CONNECT request: {request:?}");
// The request was successfully decoded, so we can send a response.
Ok(Self {
request,
send,
recv,
})
}
// Called by the server to send a response to the client.
pub async fn respond(&mut self, status: http::StatusCode) -> Result<(), ConnectError> {
let resp = ConnectResponse { status };
tracing::debug!("sending CONNECT response: {resp:?}");
resp.write(&mut self.send).await?;
Ok(())
}
pub async fn open(conn: &iroh::endpoint::Connection, url: Url) -> Result<Self, ConnectError> {
// Create a new stream that will be used to send the CONNECT frame.
let (mut send, mut recv) = conn.open_bi().await?;
// Create a new CONNECT request that we'll send using HTTP/3
let request = ConnectRequest { url };
tracing::debug!("sending CONNECT request: {request:?}");
request.write(&mut send).await?;
let response = web_transport_proto::ConnectResponse::read(&mut recv).await?;
tracing::debug!("received CONNECT response: {response:?}");
// Throw an error if we didn't get a 200 OK.
if response.status != http::StatusCode::OK {
return Err(ConnectError::ErrorStatus(response.status));
}
Ok(Self {
request,
send,
recv,
})
}
// The session ID is the stream ID of the CONNECT request.
pub fn session_id(&self) -> VarInt {
// We gotta convert from the Quinn VarInt to the (forked) WebTransport VarInt.
// We don't use the quinn::VarInt because that would mean a quinn dependency in web-transport-proto
let stream_id = quinn::VarInt::from(self.send.id());
VarInt::try_from(stream_id.into_inner()).unwrap()
}
// The URL in the CONNECT request.
pub fn url(&self) -> &Url {
&self.request.url
}
pub(super) fn into_inner(self) -> (quinn::SendStream, quinn::RecvStream) {
(self.send, self.recv)
}
// Keep reading from the control stream until it's closed.
pub(crate) async fn run_closed(self) -> (u32, String) {
let (_send, mut recv) = self.into_inner();
loop {
match web_transport_proto::Capsule::read(&mut recv).await {
Ok(web_transport_proto::Capsule::CloseWebTransportSession { code, reason }) => {
return (code, reason);
}
Ok(web_transport_proto::Capsule::Unknown { typ, payload }) => {
tracing::warn!("unknown capsule: type={typ} size={}", payload.len());
}
Err(_) => {
return (1, "capsule error".to_string());
}
}
}
}
}

View file

@ -0,0 +1,256 @@
use std::sync::Arc;
use n0_error::stack_error;
use thiserror::Error;
use crate::{ConnectError, SettingsError};
/// An error returned when connecting to a WebTransport endpoint.
#[stack_error(derive, from_sources)]
#[derive(Clone)]
pub enum ClientError {
#[error("unexpected end of stream")]
UnexpectedEnd,
#[error("failed to connect")]
Connect(#[error(source)] Arc<iroh::endpoint::ConnectError>),
#[error("connection failed")]
Connection(#[error(source, std_err)] iroh::endpoint::ConnectionError),
#[error("failed to write")]
WriteError(#[error(source, std_err)] quinn::WriteError),
#[error("failed to read")]
ReadError(#[error(source, std_err)] quinn::ReadError),
#[error("failed to exchange h3 settings")]
SettingsError(#[error(from, source, std_err)] SettingsError),
#[error("failed to exchange h3 connect")]
HttpError(#[error(from, source, std_err)] ConnectError),
#[error("invalid URL")]
InvalidUrl,
#[error("endpoint failed to bind")]
Bind(#[error(source)] Arc<iroh::endpoint::BindError>),
}
/// An errors returned by [`crate::Session`], split based on if they are underlying QUIC errors or WebTransport errors.
#[derive(Clone, Error, Debug)]
pub enum SessionError {
#[error("connection error: {0}")]
ConnectionError(#[from] iroh::endpoint::ConnectionError),
#[error("webtransport error: {0}")]
WebTransportError(#[from] WebTransportError),
#[error("send datagram error: {0}")]
SendDatagramError(#[from] quinn::SendDatagramError),
}
/// An error that can occur when reading/writing the WebTransport stream header.
#[derive(Clone, Error, Debug)]
pub enum WebTransportError {
#[error("closed: code={0} reason={1}")]
Closed(u32, String),
#[error("unknown session")]
UnknownSession,
#[error("read error: {0}")]
ReadError(#[from] quinn::ReadExactError),
#[error("write error: {0}")]
WriteError(#[from] quinn::WriteError),
}
/// An error when writing to [`crate::SendStream`]. Similar to [`quinn::WriteError`].
#[derive(Clone, Error, Debug)]
pub enum WriteError {
#[error("STOP_SENDING: {0}")]
Stopped(u32),
#[error("invalid STOP_SENDING: {0}")]
InvalidStopped(quinn::VarInt),
#[error("session error: {0}")]
SessionError(#[from] SessionError),
#[error("stream closed")]
ClosedStream,
}
impl From<quinn::WriteError> for WriteError {
fn from(e: quinn::WriteError) -> Self {
match e {
quinn::WriteError::Stopped(code) => {
match web_transport_proto::error_from_http3(code.into_inner()) {
Some(code) => WriteError::Stopped(code),
None => WriteError::InvalidStopped(code),
}
}
quinn::WriteError::ClosedStream => WriteError::ClosedStream,
quinn::WriteError::ConnectionLost(e) => WriteError::SessionError(e.into()),
quinn::WriteError::ZeroRttRejected => unreachable!("0-RTT not supported"),
}
}
}
/// An error when reading from [`crate::RecvStream`]. Similar to [`quinn::ReadError`].
#[derive(Clone, Error, Debug)]
pub enum ReadError {
#[error("session error: {0}")]
SessionError(#[from] SessionError),
#[error("RESET_STREAM: {0}")]
Reset(u32),
#[error("invalid RESET_STREAM: {0}")]
InvalidReset(quinn::VarInt),
#[error("stream already closed")]
ClosedStream,
}
impl From<quinn::ReadError> for ReadError {
fn from(value: quinn::ReadError) -> Self {
match value {
quinn::ReadError::Reset(code) => {
match web_transport_proto::error_from_http3(code.into_inner()) {
Some(code) => ReadError::Reset(code),
None => ReadError::InvalidReset(code),
}
}
quinn::ReadError::ConnectionLost(e) => ReadError::SessionError(e.into()),
quinn::ReadError::ClosedStream => ReadError::ClosedStream,
quinn::ReadError::ZeroRttRejected => unreachable!("0-RTT not supported"),
}
}
}
/// An error returned by [`crate::RecvStream::read_exact`]. Similar to [`quinn::ReadExactError`].
#[derive(Clone, Error, Debug)]
pub enum ReadExactError {
#[error("finished early")]
FinishedEarly(usize),
#[error("read error: {0}")]
ReadError(#[from] ReadError),
}
impl From<quinn::ReadExactError> for ReadExactError {
fn from(e: quinn::ReadExactError) -> Self {
match e {
quinn::ReadExactError::FinishedEarly(size) => ReadExactError::FinishedEarly(size),
quinn::ReadExactError::ReadError(e) => ReadExactError::ReadError(e.into()),
}
}
}
/// An error returned by [`crate::RecvStream::read_to_end`]. Similar to [`quinn::ReadToEndError`].
#[derive(Clone, Error, Debug)]
pub enum ReadToEndError {
#[error("too long")]
TooLong,
#[error("read error: {0}")]
ReadError(#[from] ReadError),
}
impl From<quinn::ReadToEndError> for ReadToEndError {
fn from(e: quinn::ReadToEndError) -> Self {
match e {
quinn::ReadToEndError::TooLong => ReadToEndError::TooLong,
quinn::ReadToEndError::Read(e) => ReadToEndError::ReadError(e.into()),
}
}
}
/// An error indicating the stream was already closed.
#[derive(Clone, Error, Debug)]
#[error("stream closed")]
pub struct ClosedStream;
impl From<quinn::ClosedStream> for ClosedStream {
fn from(_: quinn::ClosedStream) -> Self {
ClosedStream
}
}
/// An error returned when receiving a new WebTransport session.
#[stack_error(derive, from_sources)]
#[derive(Clone)]
pub enum ServerError {
#[error("unexpected end of stream")]
UnexpectedEnd,
#[error("connection failed")]
Connection(#[error(source, std_err)] iroh::endpoint::ConnectionError),
#[error("connection failed during handshake")]
Connecting(#[error(source)] Arc<iroh::endpoint::ConnectingError>),
#[error("failed to write")]
WriteError(#[error(source, std_err)] quinn::WriteError),
#[error("failed to read")]
ReadError(#[error(source, std_err)] quinn::ReadError),
#[error("io error")]
IoError(#[error(source)] Arc<std::io::Error>),
#[error("failed to bind endpoint")]
Bind(#[error(source)] Arc<iroh::endpoint::BindError>),
#[error("failed to exchange h3 connect")]
HttpError(#[error(source, from, std_err)] ConnectError),
#[error("failed to exchange h3 settings")]
SettingsError(#[error(source, from, std_err)] SettingsError),
}
impl web_transport_trait::Error for SessionError {
fn session_error(&self) -> Option<(u32, String)> {
if let SessionError::WebTransportError(WebTransportError::Closed(code, reason)) = self {
return Some((*code, reason.to_string()));
}
None
}
}
impl web_transport_trait::Error for WriteError {
fn session_error(&self) -> Option<(u32, String)> {
if let WriteError::SessionError(e) = self {
return e.session_error();
}
None
}
fn stream_error(&self) -> Option<u32> {
match self {
WriteError::Stopped(code) => Some(*code),
_ => None,
}
}
}
impl web_transport_trait::Error for ReadError {
fn session_error(&self) -> Option<(u32, String)> {
if let ReadError::SessionError(e) = self {
return e.session_error();
}
None
}
fn stream_error(&self) -> Option<u32> {
match self {
ReadError::Reset(code) => Some(*code),
_ => None,
}
}
}

View file

@ -0,0 +1,56 @@
//! WebTransport is a protocol for client-server communication over QUIC.
//! It's [available in the browser](https://caniuse.com/webtransport) as an alternative to HTTP and WebSockets.
//!
//! WebTransport is layered on top of HTTP/3 which is then layered on top of QUIC.
//! This library hides that detail and tries to expose only the QUIC API, delegating as much as possible to the underlying implementation.
//! See the [Quinn documentation](https://docs.rs/quinn/latest/quinn/) for more documentation.
//!
//! QUIC provides two primary APIs:
//!
//! # Streams
//! QUIC streams are ordered, reliable, flow-controlled, and optionally bidirectional.
//! Both endpoints can create and close streams (including an error code) with no overhead.
//! You can think of them as TCP connections, but shared over a single QUIC connection.
//!
//! # Datagrams
//! QUIC datagrams are unordered, unreliable, and not flow-controlled.
//! Both endpoints can send datagrams below the MTU size (~1.2kb minimum) and they might arrive out of order or not at all.
//! They are basically UDP packets, except they are encrypted and congestion controlled.
//!
//! # Limitations
//! WebTransport is able to be pooled with HTTP/3 and multiple WebTransport sessions.
//! This crate avoids that complexity, doing the bare minimum to support a single WebTransport session that owns the entire QUIC connection.
//! If you want to support HTTP/3 on the same host/port, you should use another crate (ex. `h3-webtransport`).
//! If you want to support multiple WebTransport sessions over the same QUIC connection... you should just dial a new QUIC connection instead.
// External
mod client;
mod connect;
mod error;
mod recv;
mod send;
mod server;
mod session;
mod settings;
#[cfg(test)]
mod tests;
pub use client::*;
pub use connect::*;
pub use error::*;
pub use recv::*;
pub use send::*;
pub use server::*;
pub use session::*;
pub use settings::*;
/// The HTTP/3 ALPN is required when negotiating a QUIC connection.
pub const ALPN_H3: &str = "h3";
/// Re-export the http crate because it's in the public API.
pub use http;
pub use iroh;
/// Re-export the underlying QUIC implementation.
pub use quinn;
/// Re-export the generic WebTransport implementation.
pub use web_transport_trait as generic;

View file

@ -0,0 +1,111 @@
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use crate::{ReadError, ReadExactError, ReadToEndError, SessionError};
/// A stream that can be used to recieve bytes. See [`quinn::RecvStream`].
#[derive(Debug)]
pub struct RecvStream {
inner: quinn::RecvStream,
}
impl RecvStream {
pub(crate) fn new(stream: quinn::RecvStream) -> Self {
Self { inner: stream }
}
/// Tell the other end to stop sending data with the given error code. See [`quinn::RecvStream::stop`].
/// This is a u32 with WebTransport since it shares the error space with HTTP/3.
pub fn stop(&mut self, code: u32) -> Result<(), quinn::ClosedStream> {
let code = web_transport_proto::error_to_http3(code);
let code = quinn::VarInt::try_from(code).unwrap();
self.inner.stop(code)
}
// Unfortunately, we have to wrap ReadError for a bunch of functions.
/// Read some data into the buffer and return the amount read. See [`quinn::RecvStream::read`].
pub async fn read(&mut self, buf: &mut [u8]) -> Result<Option<usize>, ReadError> {
self.inner.read(buf).await.map_err(Into::into)
}
/// Fill the entire buffer with data. See [`quinn::RecvStream::read_exact`].
pub async fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), ReadExactError> {
self.inner.read_exact(buf).await.map_err(Into::into)
}
/// Read a chunk of data from the stream. See [`quinn::RecvStream::read_chunk`].
pub async fn read_chunk(
&mut self,
max_length: usize,
) -> Result<Option<quinn::Chunk>, ReadError> {
self.inner
.read_chunk(max_length)
.await
.map_err(Into::into)
}
/// Read chunks of data from the stream. See [`quinn::RecvStream::read_chunks`].
pub async fn read_chunks(&mut self, bufs: &mut [Bytes]) -> Result<Option<usize>, ReadError> {
self.inner.read_chunks(bufs).await.map_err(Into::into)
}
/// Read until the end of the stream or the limit is hit. See [`quinn::RecvStream::read_to_end`].
pub async fn read_to_end(&mut self, size_limit: usize) -> Result<Vec<u8>, ReadToEndError> {
self.inner.read_to_end(size_limit).await.map_err(Into::into)
}
/// Block until the stream has been reset and return the error code. See [`quinn::RecvStream::received_reset`].
///
/// Unlike Quinn, this returns a SessionError, not a ResetError, because 0-RTT is not supported.
pub async fn received_reset(&mut self) -> Result<Option<u32>, SessionError> {
match self.inner.received_reset().await {
Ok(None) => Ok(None),
Ok(Some(code)) => Ok(Some(
web_transport_proto::error_from_http3(code.into_inner()).unwrap(),
)),
Err(quinn::ResetError::ConnectionLost(e)) => Err(e.into()),
Err(quinn::ResetError::ZeroRttRejected) => unreachable!("0-RTT not supported"),
}
}
// We purposely don't expose the stream ID or 0RTT because it's not valid with WebTransport
}
impl tokio::io::AsyncRead for RecvStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl web_transport_trait::RecvStream for RecvStream {
type Error = ReadError;
fn stop(&mut self, code: u32) {
Self::stop(self, code).ok();
}
async fn read(&mut self, dst: &mut [u8]) -> Result<Option<usize>, Self::Error> {
self.read(dst).await
}
async fn read_chunk(&mut self, max: usize) -> Result<Option<Bytes>, Self::Error> {
self.read_chunk(max)
.await
.map(|r| r.map(|chunk| chunk.bytes))
}
async fn closed(&mut self) -> Result<(), Self::Error> {
self.received_reset().await?;
Ok(())
}
}

View file

@ -0,0 +1,142 @@
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Buf, Bytes};
use crate::{ClosedStream, SessionError, WriteError};
/// A stream that can be used to send bytes. See [`quinn::SendStream`].
///
/// This wrapper is mainly needed for error codes, which is unfortunate.
/// WebTransport uses u32 error codes and they're mapped in a reserved HTTP/3 error space.
#[derive(Debug)]
pub struct SendStream {
stream: quinn::SendStream,
}
impl SendStream {
pub(crate) fn new(stream: quinn::SendStream) -> Self {
Self { stream }
}
/// Abruptly reset the stream with the provided error code. See [`quinn::SendStream::reset`].
/// This is a u32 with WebTransport because we share the error space with HTTP/3.
pub fn reset(&mut self, code: u32) -> Result<(), ClosedStream> {
let code = web_transport_proto::error_to_http3(code);
let code = quinn::VarInt::try_from(code).unwrap();
self.stream.reset(code).map_err(Into::into)
}
/// Wait until the stream has been stopped and return the error code. See [`quinn::SendStream::stopped`].
///
/// Unlike Quinn, this returns None if the code is not a valid WebTransport error code.
/// Also unlike Quinn, this returns a SessionError, not a StoppedError, because 0-RTT is not supported.
pub async fn stopped(&mut self) -> Result<Option<u32>, SessionError> {
match self.stream.stopped().await {
Ok(Some(code)) => Ok(web_transport_proto::error_from_http3(code.into_inner())),
Ok(None) => Ok(None),
Err(quinn::StoppedError::ConnectionLost(e)) => Err(e.into()),
Err(quinn::StoppedError::ZeroRttRejected) => unreachable!("0-RTT not supported"),
}
}
// Unfortunately, we have to wrap WriteError for a bunch of functions.
/// Write some data to the stream, returning the size written. See [`quinn::SendStream::write`].
pub async fn write(&mut self, buf: &[u8]) -> Result<usize, WriteError> {
self.stream.write(buf).await.map_err(Into::into)
}
/// Write all of the data to the stream. See [`quinn::SendStream::write_all`].
pub async fn write_all(&mut self, buf: &[u8]) -> Result<(), WriteError> {
self.stream.write_all(buf).await.map_err(Into::into)
}
/// Write chunks of data to the stream. See [`quinn::SendStream::write_chunks`].
pub async fn write_chunks(&mut self, bufs: &mut [Bytes]) -> Result<quinn::Written, WriteError> {
self.stream.write_chunks(bufs).await.map_err(Into::into)
}
/// Write a chunk of data to the stream. See [`quinn::SendStream::write_chunk`].
pub async fn write_chunk(&mut self, buf: Bytes) -> Result<(), WriteError> {
self.stream.write_chunk(buf).await.map_err(Into::into)
}
/// Write all of the chunks of data to the stream. See [`quinn::SendStream::write_all_chunks`].
pub async fn write_all_chunks(&mut self, bufs: &mut [Bytes]) -> Result<(), WriteError> {
self.stream.write_all_chunks(bufs).await.map_err(Into::into)
}
/// Mark the stream as finished, such that no more data can be written. See [`quinn::SendStream::finish`].
pub fn finish(&mut self) -> Result<(), ClosedStream> {
self.stream.finish().map_err(Into::into)
}
pub fn set_priority(&self, order: i32) -> Result<(), ClosedStream> {
self.stream.set_priority(order).map_err(Into::into)
}
pub fn priority(&self) -> Result<i32, ClosedStream> {
self.stream.priority().map_err(Into::into)
}
}
impl tokio::io::AsyncWrite for SendStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
// We have to use this syntax because quinn added its own poll_write method.
tokio::io::AsyncWrite::poll_write(Pin::new(&mut self.stream), cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
Pin::new(&mut self.stream).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
Pin::new(&mut self.stream).poll_shutdown(cx)
}
}
impl web_transport_trait::SendStream for SendStream {
type Error = WriteError;
fn set_priority(&mut self, order: u8) {
self.stream.set_priority(order.into()).ok();
}
fn reset(&mut self, code: u32) {
Self::reset(self, code).ok();
}
fn finish(&mut self) -> Result<(), Self::Error> {
Self::finish(self).map_err(|_| WriteError::ClosedStream)?;
Ok(())
}
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
Self::write(self, buf).await
}
async fn write_buf<B: Buf + Send>(&mut self, buf: &mut B) -> Result<usize, Self::Error> {
// This can avoid making a copy when Buf is Bytes, as Quinn will allocate anyway.
let size = buf.chunk().len();
let chunk = buf.copy_to_bytes(size);
self.write_chunk(chunk).await?;
Ok(size)
}
async fn write_chunk(&mut self, chunk: Bytes) -> Result<(), Self::Error> {
self.write_chunk(chunk).await
}
async fn closed(&mut self) -> Result<(), Self::Error> {
self.stopped().await?;
Ok(())
}
}

View file

@ -0,0 +1,76 @@
use url::Url;
use crate::{Connect, ServerError, Session, Settings};
/// A QUIC-only WebTransport handshake, awaiting server decision.
pub struct QuicRequest {
conn: iroh::endpoint::Connection,
}
/// An H3 WebTransport handshake, SETTINGS exchanged and CONNECT accepted,
/// awaiting server decision (respond OK / reject).
pub struct H3Request {
conn: iroh::endpoint::Connection,
settings: Settings,
connect: Connect,
}
impl QuicRequest {
/// Accept a new QUIC-only WebTransport session from a client.
pub fn accept(conn: iroh::endpoint::Connection) -> Self {
Self { conn }
}
pub fn conn(&self) -> &iroh::endpoint::Connection {
&self.conn
}
/// Accept the session.
pub fn ok(self) -> Session {
Session::raw(self.conn)
}
/// Reject the session.
pub fn close(self, status: http::StatusCode) {
self.conn
.close(status.as_u16().into(), status.as_str().as_bytes());
}
}
impl H3Request {
/// Accept a new H3 WebTransport session from a client.
pub async fn accept(conn: iroh::endpoint::Connection) -> Result<Self, ServerError> {
// Perform the H3 handshake by sending/receiving SETTINGS frames.
let settings = Settings::connect(&conn).await?;
// Accept the CONNECT request but don't send a response yet.
let connect = Connect::accept(&conn).await?;
Ok(Self {
conn,
settings,
connect,
})
}
/// Returns the URL provided by the client.
pub fn url(&self) -> &Url {
self.connect.url()
}
pub fn conn(&self) -> &iroh::endpoint::Connection {
&self.conn
}
/// Accept the session, returning a 200 OK.
pub async fn ok(mut self) -> Result<Session, ServerError> {
self.connect.respond(http::StatusCode::OK).await?;
Ok(Session::new_h3(self.conn, self.settings, self.connect))
}
/// Reject the session, returning your favorite HTTP status code.
pub async fn close(mut self, status: http::StatusCode) -> Result<(), ServerError> {
self.connect.respond(status).await?;
Ok(())
}
}

View file

@ -0,0 +1,540 @@
use std::{
fmt,
future::{Future, poll_fn},
io::Cursor,
ops::Deref,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll, ready},
};
use bytes::{Bytes, BytesMut};
use iroh::endpoint::Connection;
use n0_future::{
FuturesUnordered,
stream::{Stream, StreamExt},
};
use url::Url;
use crate::{
ClientError, Connect, RecvStream, SendStream, SessionError, Settings, WebTransportError,
};
use web_transport_proto::{Frame, StreamUni, VarInt};
/// An established WebTransport session, acting like a full QUIC connection. See [`iroh::endpoint::Connection`].
///
/// It is important to remember that WebTransport is layered on top of QUIC:
/// 1. Each stream starts with a few bytes identifying the stream type and session ID.
/// 2. Errors codes are encoded with the session ID, so they aren't full QUIC error codes.
/// 3. Stream IDs may have gaps in them, used by HTTP/3 transparant to the application.
///
/// Deref is used to expose non-overloaded methods on [`iroh::endpoint::Connection`].
/// These should be safe to use with WebTransport, but file a PR if you find one that isn't.
#[derive(Clone)]
pub struct Session {
conn: Connection,
h3: Option<H3SessionState>,
}
impl Session {
/// Create a new session from a raw QUIC connection and a URL.
///
/// This is used to pretend like a QUIC connection is a WebTransport session.
/// It's a hack, but it makes it much easier to support WebTransport and raw QUIC simultaneously.
pub fn raw(conn: Connection) -> Self {
Self { conn, h3: None }
}
/// Connect using an established QUIC connection if you want to create the connection yourself.
/// This will only work with a brand new QUIC connection using the HTTP/3 ALPN.
pub async fn connect_h3(conn: Connection, url: Url) -> Result<Session, ClientError> {
// Perform the H3 handshake by sending/reciving SETTINGS frames.
let settings = Settings::connect(&conn).await?;
// Send the HTTP/3 CONNECT request.
let connect = Connect::open(&conn, url).await?;
Ok(Self::new_h3(conn, settings, connect))
}
pub fn new_h3(conn: Connection, settings: Settings, connect: Connect) -> Self {
let h3 = H3SessionState::connect(conn.clone(), settings, &connect);
let this = Session { conn, h3: Some(h3) };
// Run a background task to check if the connect stream is closed.
let this2 = this.clone();
tokio::spawn(async move {
let (code, reason) = connect.run_closed().await;
if this2.conn().close_reason().is_none() {
// TODO We shouldn't be closing the QUIC connection with the same error.
this2.close(code, reason.as_bytes());
}
});
this
}
pub fn conn(&self) -> &Connection {
&self.conn
}
pub fn url(&self) -> Option<&Url> {
self.h3.as_ref().map(|s| &s.url)
}
/// Accept a new unidirectional stream. See [`iroh::endpoint::Connection::accept_uni`].
pub async fn accept_uni(&self) -> Result<RecvStream, SessionError> {
if let Some(h3) = &self.h3 {
poll_fn(|cx| h3.accept.lock().unwrap().poll_accept_uni(cx)).await
} else {
self.conn
.accept_uni()
.await
.map(RecvStream::new)
.map_err(Into::into)
}
}
/// Accept a new bidirectional stream. See [`iroh::endpoint::Connection::accept_bi`].
pub async fn accept_bi(&self) -> Result<(SendStream, RecvStream), SessionError> {
if let Some(h3) = &self.h3 {
poll_fn(|cx| h3.accept.lock().unwrap().poll_accept_bi(cx)).await
} else {
self.conn
.accept_bi()
.await
.map(|(send, recv)| (SendStream::new(send), RecvStream::new(recv)))
.map_err(Into::into)
}
}
/// Open a new unidirectional stream. See [`iroh::endpoint::Connection::open_uni`].
pub async fn open_uni(&self) -> Result<SendStream, SessionError> {
let mut send = self.conn.open_uni().await?;
if let Some(h3) = self.h3.as_ref() {
write_full_with_max_prio(&mut send, &h3.header_uni).await?;
}
Ok(SendStream::new(send))
}
/// Open a new bidirectional stream. See [`iroh::endpoint::Connection::open_bi`].
pub async fn open_bi(&self) -> Result<(SendStream, RecvStream), SessionError> {
let (mut send, recv) = self.conn.open_bi().await?;
if let Some(h3) = self.h3.as_ref() {
write_full_with_max_prio(&mut send, &h3.header_bi).await?;
}
Ok((SendStream::new(send), RecvStream::new(recv)))
}
/// Asynchronously receives an application datagram from the remote peer.
///
/// This method is used to receive an application datagram sent by the remote
/// peer over the connection.
/// It waits for a datagram to become available and returns the received bytes.
pub async fn read_datagram(&self) -> Result<Bytes, SessionError> {
let mut datagram = self
.conn
.read_datagram()
.await
.map_err(SessionError::from)?;
let datagram = if let Some(h3) = self.h3.as_ref() {
let mut cursor = Cursor::new(&datagram);
// We have to check and strip the session ID from the datagram.
let actual_id =
VarInt::decode(&mut cursor).map_err(|_| WebTransportError::UnknownSession)?;
if actual_id != h3.session_id {
return Err(WebTransportError::UnknownSession.into());
}
// Return the datagram without the session ID.
let datagram = datagram.split_off(cursor.position() as usize);
datagram
} else {
datagram
};
Ok(datagram)
}
/// Sends an application datagram to the remote peer.
///
/// Datagrams are unreliable and may be dropped or delivered out of order.
/// The data must be smaller than [`max_datagram_size`](Self::max_datagram_size).
pub fn send_datagram(&self, data: Bytes) -> Result<(), SessionError> {
let datagram = if let Some(h3) = self.h3.as_ref() {
// Unfortunately, we need to allocate/copy each datagram because of the Quinn API.
// Pls go +1 if you care: https://github.com/quinn-rs/quinn/issues/1724
let mut buf = BytesMut::with_capacity(h3.header_datagram.len() + data.len());
// Prepend the datagram with the header indicating the session ID.
buf.extend_from_slice(&h3.header_datagram);
buf.extend_from_slice(&data);
buf.into()
} else {
data
};
self.conn.send_datagram(datagram)?;
Ok(())
}
/// Computes the maximum size of datagrams that may be passed to
/// [`send_datagram`](Self::send_datagram).
pub fn max_datagram_size(&self) -> usize {
let mtu = self
.conn
.max_datagram_size()
.expect("datagram support is required");
if let Some(h3) = self.h3.as_ref() {
mtu.saturating_sub(h3.header_datagram.len())
} else {
mtu
}
}
/// Immediately close the connection with an error code and reason. See [`iroh::endpoint::Connection::close`].
pub fn close(&self, code: u32, reason: &[u8]) {
let code = if self.h3.is_some() {
web_transport_proto::error_to_http3(code)
.try_into()
.unwrap()
} else {
code.into()
};
self.conn.close(code, reason)
}
/// Wait until the session is closed, returning the error. See [`iroh::endpoint::Connection::closed`].
pub async fn closed(&self) -> SessionError {
self.conn.closed().await.into()
}
/// Return why the session was closed, or None if it's not closed. See [`iroh::endpoint::Connection::close_reason`].
pub fn close_reason(&self) -> Option<SessionError> {
self.conn.close_reason().map(Into::into)
}
}
async fn write_full_with_max_prio(
send: &mut quinn::SendStream,
buf: &[u8],
) -> Result<(), SessionError> {
// Set the stream priority to max and then write the stream header.
// Otherwise the application could write data with lower priority than the header, resulting in queuing.
// Also the header is very important for determining the session ID without reliable reset.
send.set_priority(i32::MAX).ok();
let res = match send.write_all(buf).await {
Ok(_) => Ok(()),
Err(quinn::WriteError::ConnectionLost(err)) => Err(err.into()),
Err(err) => Err(WebTransportError::WriteError(err).into()),
};
// Reset the stream priority back to the default of 0.
send.set_priority(0).ok();
res
}
impl Deref for Session {
type Target = Connection;
fn deref(&self) -> &Self::Target {
&self.conn
}
}
impl fmt::Debug for Session {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.conn.fmt(f)
}
}
impl PartialEq for Session {
fn eq(&self, other: &Self) -> bool {
self.conn.stable_id() == other.conn.stable_id()
}
}
impl Eq for Session {}
#[derive(Clone)]
struct H3SessionState {
url: Url,
// The session ID, as determined by the stream ID of the connect request.
session_id: VarInt,
// Cache the headers in front of each stream we open.
header_uni: Vec<u8>,
header_bi: Vec<u8>,
header_datagram: Vec<u8>,
// Keep a reference to the settings and connect stream to avoid closing them until dropped.
#[allow(dead_code)]
settings: Arc<Settings>,
// The accept logic is stateful, so use an Arc<Mutex> to share it.
accept: Arc<Mutex<H3SessionAccept>>,
}
impl H3SessionState {
fn connect(conn: Connection, settings: Settings, connect: &Connect) -> Self {
// The session ID is the stream ID of the CONNECT request.
let session_id = connect.session_id();
// Cache the tiny header we write in front of each stream we open.
let mut header_uni = Vec::new();
StreamUni::WEBTRANSPORT.encode(&mut header_uni);
session_id.encode(&mut header_uni);
let mut header_bi = Vec::new();
Frame::WEBTRANSPORT.encode(&mut header_bi);
session_id.encode(&mut header_bi);
let mut header_datagram = Vec::new();
session_id.encode(&mut header_datagram);
// Accept logic is stateful, so use an Arc<Mutex> to share it.
let accept = H3SessionAccept::new(conn, session_id);
Self {
url: connect.url().clone(),
session_id,
header_uni,
header_bi,
header_datagram,
settings: Arc::new(settings),
accept: Arc::new(Mutex::new(accept)),
}
}
}
// Type aliases just so clippy doesn't complain about the complexity.
type AcceptUni =
dyn Stream<Item = Result<quinn::RecvStream, iroh::endpoint::ConnectionError>> + Send;
type AcceptBi = dyn Stream<Item = Result<(quinn::SendStream, quinn::RecvStream), iroh::endpoint::ConnectionError>>
+ Send;
type PendingUni = dyn Future<Output = Result<(StreamUni, quinn::RecvStream), SessionError>> + Send;
type PendingBi = dyn Future<Output = Result<Option<(quinn::SendStream, quinn::RecvStream)>, SessionError>>
+ Send;
// Logic just for accepting streams, which is annoying because of the stream header.
pub struct H3SessionAccept {
session_id: VarInt,
// We also need to keep a reference to the qpack streams if the endpoint (incorrectly) creates them.
// Again, this is just so they don't get closed until we drop the session.
qpack_encoder: Option<quinn::RecvStream>,
qpack_decoder: Option<quinn::RecvStream>,
accept_uni: Pin<Box<AcceptUni>>,
accept_bi: Pin<Box<AcceptBi>>,
// Keep track of work being done to read/write the WebTransport stream header.
pending_uni: FuturesUnordered<Pin<Box<PendingUni>>>,
pending_bi: FuturesUnordered<Pin<Box<PendingBi>>>,
}
impl H3SessionAccept {
pub(crate) fn new(conn: Connection, session_id: VarInt) -> Self {
// Create a stream that just outputs new streams, so it's easy to call from poll.
let accept_uni = Box::pin(n0_future::stream::unfold(conn.clone(), |conn| async {
Some((conn.accept_uni().await, conn))
}));
let accept_bi = Box::pin(n0_future::stream::unfold(conn, |conn| async {
Some((conn.accept_bi().await, conn))
}));
Self {
session_id,
qpack_decoder: None,
qpack_encoder: None,
accept_uni,
accept_bi,
pending_uni: FuturesUnordered::new(),
pending_bi: FuturesUnordered::new(),
}
}
// This is poll-based because we accept and decode streams in parallel.
// In async land I would use tokio::JoinSet, but that requires a runtime.
// It's better to use FuturesUnordered instead because it's agnostic.
pub fn poll_accept_uni(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<RecvStream, SessionError>> {
loop {
// Accept any new streams.
if let Poll::Ready(Some(res)) = self.accept_uni.poll_next(cx) {
// Start decoding the header and add the future to the list of pending streams.
let recv = res?;
let pending = Self::decode_uni(recv, self.session_id);
self.pending_uni.push(Box::pin(pending));
continue;
}
// Poll the list of pending streams.
let (typ, recv) = match ready!(self.pending_uni.poll_next(cx)) {
Some(Ok(res)) => res,
Some(Err(err)) => {
// Ignore the error, the stream was probably reset early.
tracing::warn!("failed to decode unidirectional stream: {err:?}");
continue;
}
None => return Poll::Pending,
};
// Decide if we keep looping based on the type.
match typ {
StreamUni::WEBTRANSPORT => {
let recv = RecvStream::new(recv);
return Poll::Ready(Ok(recv));
}
StreamUni::QPACK_DECODER => {
self.qpack_decoder = Some(recv);
}
StreamUni::QPACK_ENCODER => {
self.qpack_encoder = Some(recv);
}
_ => {
// ignore unknown streams
tracing::debug!("ignoring unknown unidirectional stream: {typ:?}");
}
}
}
}
// Reads the stream header, returning the stream type.
async fn decode_uni(
mut recv: quinn::RecvStream,
expected_session: VarInt,
) -> Result<(StreamUni, quinn::RecvStream), SessionError> {
// Read the VarInt at the start of the stream.
let typ = VarInt::read(&mut recv)
.await
.map_err(|_| WebTransportError::UnknownSession)?;
let typ = StreamUni(typ);
if typ == StreamUni::WEBTRANSPORT {
// Read the session_id and validate it
let session_id = VarInt::read(&mut recv)
.await
.map_err(|_| WebTransportError::UnknownSession)?;
if session_id != expected_session {
return Err(WebTransportError::UnknownSession.into());
}
}
// We need to keep a reference to the qpack streams if the endpoint (incorrectly) creates them, so return everything.
Ok((typ, recv))
}
pub fn poll_accept_bi(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(SendStream, RecvStream), SessionError>> {
loop {
// Accept any new streams.
if let Poll::Ready(Some(res)) = self.accept_bi.poll_next(cx) {
// Start decoding the header and add the future to the list of pending streams.
let (send, recv) = res?;
let pending = Self::decode_bi(send, recv, self.session_id);
self.pending_bi.push(Box::pin(pending));
continue;
}
// Poll the list of pending streams.
let res = match ready!(self.pending_bi.poll_next(cx)) {
Some(Ok(res)) => res,
Some(Err(err)) => {
// Ignore the error, the stream was probably reset early.
tracing::warn!("failed to decode bidirectional stream: {err:?}");
continue;
}
None => return Poll::Pending,
};
if let Some((send, recv)) = res {
// Wrap the streams in our own types for correct error codes.
let send = SendStream::new(send);
let recv = RecvStream::new(recv);
return Poll::Ready(Ok((send, recv)));
}
// Keep looping if it's a stream we want to ignore.
}
}
// Reads the stream header, returning Some if it's a WebTransport stream.
async fn decode_bi(
send: quinn::SendStream,
mut recv: quinn::RecvStream,
expected_session: VarInt,
) -> Result<Option<(quinn::SendStream, quinn::RecvStream)>, SessionError> {
let typ = VarInt::read(&mut recv)
.await
.map_err(|_| WebTransportError::UnknownSession)?;
if Frame(typ) != Frame::WEBTRANSPORT {
tracing::debug!("ignoring unknown bidirectional stream: {typ:?}");
return Ok(None);
}
// Read the session ID and validate it.
let session_id = VarInt::read(&mut recv)
.await
.map_err(|_| WebTransportError::UnknownSession)?;
if session_id != expected_session {
return Err(WebTransportError::UnknownSession.into());
}
Ok(Some((send, recv)))
}
}
impl web_transport_trait::Session for Session {
type SendStream = SendStream;
type RecvStream = RecvStream;
type Error = SessionError;
async fn accept_uni(&self) -> Result<Self::RecvStream, Self::Error> {
Self::accept_uni(self).await
}
async fn accept_bi(&self) -> Result<(Self::SendStream, Self::RecvStream), Self::Error> {
Self::accept_bi(self).await
}
async fn open_bi(&self) -> Result<(Self::SendStream, Self::RecvStream), Self::Error> {
Self::open_bi(self).await
}
async fn open_uni(&self) -> Result<Self::SendStream, Self::Error> {
Self::open_uni(self).await
}
fn close(&self, code: u32, reason: &str) {
Self::close(self, code, reason.as_bytes());
}
async fn closed(&self) -> Self::Error {
Self::closed(self).await
}
fn send_datagram(&self, data: Bytes) -> Result<(), Self::Error> {
Self::send_datagram(self, data)
}
async fn recv_datagram(&self) -> Result<Bytes, Self::Error> {
Self::read_datagram(self).await
}
fn max_datagram_size(&self) -> usize {
Self::max_datagram_size(self)
}
}

View file

@ -0,0 +1,69 @@
use thiserror::Error;
use tokio::try_join;
#[derive(Error, Debug, Clone)]
pub enum SettingsError {
#[error("quic stream was closed early")]
UnexpectedEnd,
#[error("protocol error: {0}")]
ProtoError(#[from] web_transport_proto::SettingsError),
#[error("WebTransport is not supported")]
WebTransportUnsupported,
#[error("connection error")]
ConnectionError(#[from] iroh::endpoint::ConnectionError),
#[error("read error")]
ReadError(#[from] quinn::ReadError),
#[error("write error")]
WriteError(#[from] quinn::WriteError),
}
pub struct Settings {
// A reference to the send/recv stream, so we don't close it until dropped.
#[allow(dead_code)]
send: quinn::SendStream,
#[allow(dead_code)]
recv: quinn::RecvStream,
}
impl Settings {
// Establish the H3 connection.
pub async fn connect(conn: &iroh::endpoint::Connection) -> Result<Self, SettingsError> {
let recv = Self::accept(conn);
let send = Self::open(conn);
// Run both tasks concurrently until one errors or they both complete.
let (send, recv) = try_join!(send, recv)?;
Ok(Self { send, recv })
}
async fn accept(conn: &iroh::endpoint::Connection) -> Result<quinn::RecvStream, SettingsError> {
let mut recv = conn.accept_uni().await?;
let settings = web_transport_proto::Settings::read(&mut recv).await?;
tracing::debug!("received SETTINGS frame: {settings:?}");
if settings.supports_webtransport() == 0 {
return Err(SettingsError::WebTransportUnsupported);
}
Ok(recv)
}
async fn open(conn: &iroh::endpoint::Connection) -> Result<quinn::SendStream, SettingsError> {
let mut settings = web_transport_proto::Settings::default();
settings.enable_webtransport(1);
tracing::debug!("sending SETTINGS frame: {settings:?}");
let mut send = conn.open_uni().await?;
settings.write(&mut send).await?;
Ok(send)
}
}

View file

@ -0,0 +1,128 @@
use iroh::{Endpoint, endpoint::ConnectionError};
use n0_tracing_test::traced_test;
use tracing::Instrument;
use url::Url;
use crate::{ALPN_H3, Client, H3Request, QuicRequest, SessionError};
#[tokio::test]
#[traced_test]
async fn h3_smoke() -> n0_error::Result<()> {
let client = Endpoint::bind()
.instrument(tracing::error_span!("client-ep"))
.await
.unwrap();
let client_id = client.id();
let client = Client::new(client);
let server = Endpoint::builder()
.alpns(vec![ALPN_H3.as_bytes().to_vec()])
.bind()
.instrument(tracing::error_span!("server-ep"))
.await
.unwrap();
let server_id = server.id();
let server_addr = server.addr();
let url: Url = format!("https://{}/foo", server_id).parse().unwrap();
let client_task = tokio::task::spawn({
let url = url.clone();
async move {
let session = client.connect_h3(server_addr, url.clone()).await.inspect_err(|err| println!("{err:#?}")).unwrap();
assert_eq!(session.remote_id(), server_id);
assert_eq!(session.url(), Some(&url));
let mut stream = session.open_uni().await.unwrap();
stream.write_all(b"hi").await.unwrap();
stream.finish().unwrap();
let reason = session.closed().await;
assert!(
matches!(reason, SessionError::ConnectionError(ConnectionError::ApplicationClosed(frame)) if web_transport_proto::error_from_http3(frame.error_code.into_inner()) == Some(23))
);
drop(session);
client.close().await;
}.instrument(tracing::error_span!("client"))
});
let server_task = tokio::task::spawn(
async move {
let conn = server.accept().await.unwrap().await.unwrap();
assert_eq!(conn.alpn(), ALPN_H3.as_bytes());
let request = H3Request::accept(conn)
.await
.inspect_err(|err| tracing::error!("accept failed: {err:?}"))
.unwrap();
assert_eq!(request.url(), &url);
assert_eq!(request.conn().remote_id(), client_id);
let session = request.ok().await.unwrap();
assert_eq!(session.url(), Some(&url));
assert_eq!(session.conn().remote_id(), client_id);
let mut stream = session.accept_uni().await.unwrap();
let buf = stream.read_to_end(2).await.unwrap();
assert_eq!(buf, b"hi");
session.close(23, b"bye");
server.close().await;
}
.instrument(tracing::error_span!("server")),
);
client_task.await.unwrap();
server_task.await.unwrap();
Ok(())
}
#[tokio::test]
#[traced_test]
async fn quic_smoke() -> n0_error::Result<()> {
const ALPN: &str = "moql";
let client = Endpoint::bind().await.unwrap();
let client_id = client.id();
let client = Client::new(client);
let server = Endpoint::builder()
.alpns(vec![ALPN.as_bytes().to_vec()])
.bind()
.await
.unwrap();
let server_id = server.id();
let server_addr = server.addr();
let client_task = tokio::task::spawn({
async move {
let session = client
.connect_quic(server_addr, ALPN.as_bytes())
.await
.unwrap();
println!("session established");
assert_eq!(session.remote_id(), server_id);
assert_eq!(session.url(), None);
let reason = session.closed().await;
assert!(
matches!(reason, SessionError::ConnectionError(ConnectionError::ApplicationClosed(frame)) if frame.error_code.into_inner() == 23)
)
}.instrument(tracing::error_span!("client"))
});
let server_task = tokio::task::spawn({
async move {
let conn = server.accept().await.unwrap().await.unwrap();
assert_eq!(conn.alpn(), ALPN.as_bytes());
let request = QuicRequest::accept(conn);
assert_eq!(request.conn().remote_id(), client_id);
let session = request.ok();
assert_eq!(session.url(), None);
assert_eq!(session.conn().remote_id(), client_id);
session.close(23, b"bye");
}
.instrument(tracing::error_span!("server"))
});
client_task.await.unwrap();
server_task.await.unwrap();
Ok(())
}

View file

@ -0,0 +1,3 @@
[target.wasm32-unknown-unknown]
runner = "wasm-bindgen-test-runner"
rustflags = ['--cfg', 'getrandom_backend="wasm_js"']

View file

@ -0,0 +1,10 @@
[test-groups]
run-in-isolation = { max-threads = 32 }
# these are tests that must not run with other tests concurrently. All tests in
# this group can take up at most 32 threads among them, but each one requiring
# 16 threads also. The effect should be that tests run isolated.
[[profile.ci.overrides]]
filter = 'test(::run_in_isolation::)'
test-group = 'run-in-isolation'
threads-required = 32

View file

@ -0,0 +1,13 @@
# Keep GitHub Actions up to date with GitHub's Dependabot...
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
version: 2
updates:
- package-ecosystem: github-actions
directory: /
groups:
github-actions:
patterns:
- "*" # Group all Actions updates into a single larger pull request
schedule:
interval: weekly

View file

@ -0,0 +1,18 @@
## Description
<!-- A summary of what this pull request achieves and a rough list of changes. -->
## Breaking Changes
<!-- Optional, if there are any breaking changes document them, including how to migrate older code. -->
## Notes & open questions
<!-- Any notes, remarks or open questions you have to make about the PR. -->
## Change checklist
- [ ] Self-review.
- [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant.
- [ ] Tests if relevant.
- [ ] All breaking changes documented.

View file

@ -0,0 +1,43 @@
# Run tests using the beta Rust compiler
name: Beta Rust
on:
schedule:
# 06:50 UTC every Monday
- cron: '50 6 * * 1'
workflow_dispatch:
concurrency:
group: beta-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
tests:
uses: './.github/workflows/tests.yaml'
with:
rust-version: beta
notify:
needs: tests
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Extract test results
run: |
printf '${{ toJSON(needs) }}\n'
result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result)
echo TESTS_RESULT=$result
echo "TESTS_RESULT=$result" >>"$GITHUB_ENV"
- name: Notify discord on failure
uses: n0-computer/discord-webhook-notify@v1
if: ${{ env.TESTS_RESULT == 'failure' }}
with:
severity: error
details: |
Rustc beta tests failed in **${{ github.repository }}**
See https://github.com/${{ github.repository }}/actions/workflows/beta.yaml
webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }}

View file

@ -0,0 +1,305 @@
name: CI
on:
pull_request:
types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ]
merge_group:
push:
branches:
- main
concurrency:
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
RUST_BACKTRACE: 1
RUSTFLAGS: -Dwarnings
RUSTDOCFLAGS: -Dwarnings
MSRV: "1.89"
SCCACHE_CACHE_SIZE: "50G"
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
tests:
name: CI Test Suite
if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
uses: './.github/workflows/tests.yaml'
cross_build:
name: Cross Build Only
if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
timeout-minutes: 30
runs-on: [self-hosted, linux, X64]
strategy:
fail-fast: false
matrix:
target:
# cross tests are currently broken vor armv7 and aarch64
# see https://github.com/cross-rs/cross/issues/1311
# - armv7-linux-androideabi
# - aarch64-linux-android
# Freebsd execution fails in cross
# - i686-unknown-freebsd # Linking fails :/
- x86_64-unknown-freebsd
# Netbsd execution fails to link in cross
# - x86_64-unknown-netbsd
steps:
- name: Checkout
uses: actions/checkout@v5
with:
submodules: recursive
- name: Install rust stable
uses: dtolnay/rust-toolchain@stable
- name: Cleanup Docker
continue-on-error: true
run: |
docker kill $(docker ps -q)
# See https://github.com/cross-rs/cross/issues/1222
- uses: taiki-e/install-action@cross
- name: build
# cross tests are currently broken vor armv7 and aarch64
# see https://github.com/cross-rs/cross/issues/1311. So on
# those platforms we only build but do not run tests.
run: cross build --all --target ${{ matrix.target }}
env:
RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
android_build:
name: Android Build Only
if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
timeout-minutes: 30
# runs-on: ubuntu-latest
runs-on: [self-hosted, linux, X64]
strategy:
fail-fast: false
matrix:
target:
- aarch64-linux-android
- armv7-linux-androideabi
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.target }}
- name: Install rustup target
run: rustup target add ${{ matrix.target }}
- name: Setup Java
uses: actions/setup-java@v5
with:
distribution: 'temurin'
java-version: '17'
- name: Setup Android SDK
uses: android-actions/setup-android@v3
- name: Setup Android NDK
uses: arqu/setup-ndk@main
id: setup-ndk
with:
ndk-version: r23
add-to-path: true
- name: Build
env:
ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }}
run: |
cargo install --version 3.5.4 cargo-ndk
cargo ndk --target ${{ matrix.target }} build
cross_test:
name: Cross Test
if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
timeout-minutes: 30
runs-on: [self-hosted, linux, X64]
strategy:
fail-fast: false
matrix:
target:
- i686-unknown-linux-gnu
steps:
- name: Checkout
uses: actions/checkout@v5
with:
submodules: recursive
- name: Install rust stable
uses: dtolnay/rust-toolchain@stable
- name: Cleanup Docker
continue-on-error: true
run: |
docker kill $(docker ps -q)
# See https://github.com/cross-rs/cross/issues/1222
- uses: taiki-e/install-action@cross
- name: test
run: cross test --all --target ${{ matrix.target }} -- --test-threads=12
env:
RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }}
wasm_build:
name: Build wasm32
runs-on: ubuntu-latest
env:
RUSTFLAGS: '--cfg getrandom_backend="wasm_js"'
steps:
- name: Checkout sources
uses: actions/checkout@v5
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- name: Add wasm target
run: rustup target add wasm32-unknown-unknown
- name: Install wasm-tools
uses: bytecodealliance/actions/wasm-tools/setup@v1
- name: wasm32 build
run: cargo build --target wasm32-unknown-unknown
# If the Wasm file contains any 'import "env"' declarations, then
# some non-Wasm-compatible code made it into the final code.
- name: Ensure no 'import "env"' in iroh-relay Wasm
run: |
! wasm-tools print --skeleton target/wasm32-unknown-unknown/debug/iroh_gossip.wasm | grep 'import "env"'
check_semver:
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: Setup Environment (PR)
if: ${{ github.event_name == 'pull_request' }}
shell: bash
run: |
echo "HEAD_COMMIT_SHA=$(git rev-parse origin/${{ github.base_ref }})" >> ${GITHUB_ENV}
- name: Setup Environment (Push)
if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }}
shell: bash
run: |
echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV}
- name: Check semver
# uses: obi1kenobi/cargo-semver-checks-action@v2
uses: n0-computer/cargo-semver-checks-action@feat-baseline
with:
package: iroh-gossip
baseline-rev: ${{ env.HEAD_COMMIT_SHA }}
use-cache: false
check_fmt:
timeout-minutes: 30
name: Checking fmt
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: taiki-e/install-action@cargo-make
- run: cargo make format-check
check_docs:
timeout-minutes: 30
name: Checking docs
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
RUSTDOCFLAGS: -Dwarnings
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@nightly
- uses: dtolnay/install@cargo-docs-rs
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: iroh-gossip docs
run: cargo docs-rs
clippy_check:
timeout-minutes: 30
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
# TODO: We have a bunch of platform-dependent code so should
# probably run this job on the full platform matrix
- name: clippy check (all features)
run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches
- name: clippy check (no features)
run: cargo clippy --workspace --no-default-features --lib --bins --tests
- name: clippy check (default features)
run: cargo clippy --workspace --all-targets
msrv:
if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
timeout-minutes: 30
name: Minimal Supported Rust Version
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.MSRV }}
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: Check MSRV all features
run: |
cargo +$MSRV check --workspace --all-targets
cargo_deny:
timeout-minutes: 30
name: cargo deny
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: EmbarkStudios/cargo-deny-action@v2
with:
arguments: --workspace --all-features
command: check
command-arguments: "-Dwarnings"
codespell:
timeout-minutes: 30
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- run: pip install --user codespell[toml]
- run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md

View file

@ -0,0 +1,45 @@
# Run tests using the beta Rust compiler
name: Cleanup
on:
schedule:
# 06:50 UTC every Monday
- cron: '50 6 * * 1'
workflow_dispatch:
concurrency:
group: beta-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
clean_docs_branch:
permissions:
issues: write
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
ref: generated-docs-preview
- name: Clean docs branch
run: |
cd pr/
# keep the last 25 prs
dirs=$(ls -1d [0-9]* | sort -n)
total_dirs=$(echo "$dirs" | wc -l)
dirs_to_remove=$(echo "$dirs" | head -n $(($total_dirs - 25)))
if [ -n "$dirs_to_remove" ]; then
echo "$dirs_to_remove" | xargs rm -rf
fi
git add .
git commit -m "Cleanup old docs"
git push

View file

@ -0,0 +1,19 @@
name: Commits
on:
pull_request:
branches: [main]
types: [opened, edited, synchronize]
env:
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
check-for-cc:
runs-on: ubuntu-latest
steps:
- name: check-for-cc
id: check-for-cc
uses: agenthunt/conventional-commit-checker-action@v2.0.0
with:
pr-title-regex: "^(.+)(?:(([^)s]+)))?!?: (.+)"

View file

@ -0,0 +1,73 @@
name: Docs Preview
on:
pull_request:
workflow_dispatch:
inputs:
pr_number:
required: true
type: string
# ensure job runs sequentially so pushing to the preview branch doesn't conflict
concurrency:
group: ci-docs-preview
env:
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
preview_docs:
permissions: write-all
timeout-minutes: 30
name: Docs preview
if: ${{ (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' ) && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
SCCACHE_CACHE_SIZE: "50G"
PREVIEW_PATH: pr/${{ github.event.pull_request.number || inputs.pr_number }}/docs
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-10-09
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: Generate Docs
run: cargo doc --workspace --all-features --no-deps
env:
RUSTDOCFLAGS: --cfg iroh_docsrs
- name: Deploy Docs to Preview Branch
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./target/doc/
destination_dir: ${{ env.PREVIEW_PATH }}
publish_branch: generated-docs-preview
- name: Find Docs Comment
uses: peter-evans/find-comment@v4
id: fc
with:
issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
comment-author: 'github-actions[bot]'
body-includes: Documentation for this PR has been generated
- name: Get current timestamp
id: get_timestamp
run: echo "TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
- name: Create or Update Docs Comment
uses: peter-evans/create-or-update-comment@v5
with:
issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
comment-id: ${{ steps.fc.outputs.comment-id }}
body: |
Documentation for this PR has been generated and is available at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/${{ env.PREVIEW_PATH }}/iroh_gossip/
Last updated: ${{ env.TIMESTAMP }}
edit-mode: replace

View file

@ -0,0 +1,99 @@
# Run all tests, including flaky test.
#
# The default CI workflow ignores flaky tests. This workflow will run
# all tests, including ignored ones.
#
# To use this workflow you can either:
#
# - Label a PR with "flaky-test", the normal CI workflow will not run
# any jobs but the jobs here will be run. Note that to merge the PR
# you'll need to remove the label eventually because the normal CI
# jobs are required by branch protection.
#
# - Manually trigger the workflow, you may choose a branch for this to
# run on.
#
# Additionally this jobs runs once a day on a schedule.
#
# Currently doctests are not run by this workflow.
name: Flaky CI
on:
pull_request:
types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ]
schedule:
# 06:30 UTC every day
- cron: '30 6 * * *'
workflow_dispatch:
inputs:
branch:
description: 'Branch to run on, defaults to main'
required: true
default: 'main'
type: string
concurrency:
group: flaky-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
tests:
if: "contains(github.event.pull_request.labels.*.name, 'flaky-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'"
uses: './.github/workflows/tests.yaml'
with:
flaky: true
git-ref: ${{ inputs.branch }}
notify:
needs: tests
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Extract test results
run: |
printf '${{ toJSON(needs) }}\n'
result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result)
echo TESTS_RESULT=$result
echo "TESTS_RESULT=$result" >>"$GITHUB_ENV"
- name: download nextest reports
uses: actions/download-artifact@v5
with:
pattern: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-*
merge-multiple: true
path: nextest-results
- name: create summary report
id: make_summary
run: |
# prevent the glob expression in the loop to match on itself when the dir is empty
shopt -s nullglob
# to deal with multiline outputs it's recommended to use a random EOF, the syntax is based on
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
EOF=aP51VriWCxNJ1JjvmO9i
echo "summary<<$EOF" >> $GITHUB_OUTPUT
echo "Flaky tests failure:" >> $GITHUB_OUTPUT
echo " " >> $GITHUB_OUTPUT
for report in nextest-results/*.json; do
# remove the name prefix and extension, and split the parts
name=$(echo ${report:16:-5} | tr _ ' ')
echo $name
echo "- **$name**" >> $GITHUB_OUTPUT
# select the failed tests
# the tests have this format "crate::module$test_name", the sed expressions remove the quotes and replace $ for ::
failure=$(jq --slurp '.[] | select(.["type"] == "test" and .["event"] == "failed" ) | .["name"]' $report | sed -e 's/^"//g' -e 's/\$/::/' -e 's/"//')
echo "$failure"
echo "$failure" >> $GITHUB_OUTPUT
done
echo "" >> $GITHUB_OUTPUT
echo "See https://github.com/${{ github.repository }}/actions/workflows/flaky.yaml" >> $GITHUB_OUTPUT
echo "$EOF" >> $GITHUB_OUTPUT
- name: Notify discord on failure
uses: n0-computer/discord-webhook-notify@v1
if: ${{ env.TESTS_RESULT == 'failure' || env.TESTS_RESULT == 'success' }}
with:
text: "Flaky tests in **${{ github.repository }}**:"
severity: ${{ env.TESTS_RESULT == 'failure' && 'warn' || 'info' }}
details: ${{ env.TESTS_RESULT == 'failure' && steps.make_summary.outputs.summary || 'No flaky failures!' }}
webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }}

View file

@ -0,0 +1,50 @@
name: release
on:
push:
tags:
- "v*"
workflow_dispatch:
inputs:
release_version:
description: "Release version"
required: true
default: ""
create_release:
description: "Create release"
required: true
default: "true"
jobs:
create-release:
name: create-release
runs-on: ubuntu-latest
outputs:
upload_url: ${{ steps.release.outputs.upload_url }}
release_version: ${{ env.RELEASE_VERSION }}
steps:
- name: Get the release version from the tag (push)
shell: bash
if: env.RELEASE_VERSION == '' && github.event_name == 'push'
run: |
# See: https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
echo "version is: ${{ env.RELEASE_VERSION }}"
- name: Get the release version from the tag (dispatch)
shell: bash
if: github.event_name == 'workflow_dispatch'
run: |
echo "RELEASE_VERSION=${{ github.event.inputs.release_version }}" >> $GITHUB_ENV
echo "version is: ${{ env.RELEASE_VERSION }}"
- name: Checkout repository
uses: actions/checkout@v5
with:
fetch-depth: 1
- name: Create GitHub release
id: release
if: github.event.inputs.create_release == 'true' || github.event_name == 'push'
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ env.RELEASE_VERSION }}
release_name: ${{ env.RELEASE_VERSION }}

View file

@ -0,0 +1,53 @@
name: run simulations
on:
pull_request:
jobs:
run_sim:
runs-on: ubuntu-latest
env:
RUSTC_WRAPPER: "sccache"
SCCACHE_GHA_ENABLED: "on"
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Install rust stable
uses: dtolnay/rust-toolchain@stable
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: Run simulations
run: |
git checkout ${{ github.event.pull_request.base.ref }}
cargo run -q --bin sim --features simulator --release -- run -c simulations/all.toml -o /tmp/sim-main
git checkout ${{ github.event.pull_request.head.sha }}
cargo run -q --bin sim --features simulator --release -- run -c simulations/all.toml -o /tmp/sim-pr --baseline /tmp/sim-main |& tee REPORT
echo "<details><summary>Simulation report</summary>" >> COMMENT
echo "" >> COMMENT
echo '```' >> COMMENT
cat REPORT >> COMMENT
echo "" >> COMMENT
echo '```' >> COMMENT
echo "</details>" >> COMMENT
echo "" >> COMMENT
echo "*Last updated: $(date -u +'%Y-%m-%dT%H:%M:%SZ')*" >> COMMENT
- name: Find Docs Comment
uses: peter-evans/find-comment@v4
id: fc
with:
issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
comment-author: "github-actions[bot]"
body-includes: Simulation report
- name: Create or Update Docs Comment
uses: peter-evans/create-or-update-comment@v5
with:
issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
comment-id: ${{ steps.fc.outputs.comment-id }}
body-path: COMMENT
edit-mode: replace

View file

@ -0,0 +1,229 @@
# Run all tests, with or without flaky tests.
name: Tests
on:
workflow_call:
inputs:
rust-version:
description: 'The version of the rust compiler to run'
type: string
default: 'stable'
flaky:
description: 'Whether to also run flaky tests'
type: boolean
default: false
git-ref:
description: 'Which git ref to checkout'
type: string
default: ${{ github.ref }}
env:
RUST_BACKTRACE: 1
RUSTFLAGS: -Dwarnings
RUSTDOCFLAGS: -Dwarnings
SCCACHE_CACHE_SIZE: "50G"
CRATES_LIST: "iroh-gossip"
IROH_FORCE_STAGING_RELAYS: "1"
jobs:
build_and_test_nix:
timeout-minutes: 30
name: "Tests"
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
name: [ubuntu-latest, macOS-arm-latest]
rust: [ '${{ inputs.rust-version }}' ]
features: [all, none, default]
include:
- name: ubuntu-latest
os: ubuntu-latest
release-os: linux
release-arch: amd64
runner: [self-hosted, linux, X64]
- name: macOS-arm-latest
os: macOS-latest
release-os: darwin
release-arch: aarch64
runner: [self-hosted, macOS, ARM64]
env:
# Using self-hosted runners so use local cache for sccache and
# not SCCACHE_GHA_ENABLED.
RUSTC_WRAPPER: "sccache"
steps:
- name: Checkout
uses: actions/checkout@v5
with:
ref: ${{ inputs.git-ref }}
- name: Install ${{ matrix.rust }} rust
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
- name: Install cargo-nextest
uses: taiki-e/install-action@v2
with:
tool: nextest@0.9.80
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- name: Select features
run: |
case "${{ matrix.features }}" in
all)
echo "FEATURES=--all-features" >> "$GITHUB_ENV"
;;
none)
echo "FEATURES=--no-default-features" >> "$GITHUB_ENV"
;;
default)
echo "FEATURES=" >> "$GITHUB_ENV"
;;
*)
exit 1
esac
- name: check features
if: ${{ ! inputs.flaky }}
run: |
for i in ${CRATES_LIST//,/ }
do
echo "Checking $i $FEATURES"
if [ $i = "iroh-cli" ]; then
targets="--bins"
else
targets="--lib --bins"
fi
echo cargo check -p $i $FEATURES $targets
cargo check -p $i $FEATURES $targets
done
env:
RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
- name: build tests
run: |
cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run
- name: list ignored tests
run: |
cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only
- name: run tests
run: |
mkdir -p output
cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
env:
RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1
- name: upload results
if: ${{ failure() && inputs.flaky }}
uses: actions/upload-artifact@v4
with:
name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
path: output
retention-days: 45
compression-level: 0
- name: doctests
if: ${{ (! inputs.flaky) && matrix.features == 'all' }}
run: |
if [ -n "${{ runner.debug }}" ]; then
export RUST_LOG=TRACE
else
export RUST_LOG=DEBUG
fi
cargo test --workspace --all-features --doc
build_and_test_windows:
timeout-minutes: 30
name: "Tests"
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
name: [windows-latest]
rust: [ '${{ inputs.rust-version}}' ]
features: [all, none, default]
target:
- x86_64-pc-windows-msvc
include:
- name: windows-latest
os: windows
runner: [self-hosted, windows, x64]
env:
# Using self-hosted runners so use local cache for sccache and
# not SCCACHE_GHA_ENABLED.
RUSTC_WRAPPER: "sccache"
steps:
- name: Checkout
uses: actions/checkout@v5
with:
ref: ${{ inputs.git-ref }}
- name: Install ${{ matrix.rust }}
run: |
rustup toolchain install ${{ matrix.rust }}
rustup toolchain default ${{ matrix.rust }}
rustup target add ${{ matrix.target }}
rustup set default-host ${{ matrix.target }}
- name: Install cargo-nextest
shell: powershell
run: |
$tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru
Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows
$outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" }
$tmp | Expand-Archive -DestinationPath $outputDir -Force
$tmp | Remove-Item
- name: Select features
run: |
switch ("${{ matrix.features }}") {
"all" {
echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
}
"none" {
echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
}
"default" {
echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
}
default {
Exit 1
}
}
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.9
- uses: msys2/setup-msys2@v2
- name: build tests
run: |
cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run
- name: list ignored tests
run: |
cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only
- name: tests
run: |
mkdir -p output
cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
env:
RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1
- name: upload results
if: ${{ failure() && inputs.flaky }}
uses: actions/upload-artifact@v4
with:
name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
path: output
retention-days: 1
compression-level: 0

View file

@ -0,0 +1 @@
/target

View file

@ -0,0 +1,250 @@
# Changelog
All notable changes to iroh-gossip will be documented in this file.
## [0.96.0](https://github.com/n0-computer/iroh-gossip/compare/v0.95.0..0.96.0) - 2026-01-29
### ⛰️ Features
- Add `neighbors()` method to `GossipTopic` ([#124](https://github.com/n0-computer/iroh-gossip/issues/124)) - ([9e4ddaa](https://github.com/n0-computer/iroh-gossip/commit/9e4ddaa904c6e1b853081b9f2e4f9628ed274b08))
### 🐛 Bug Fixes
- Keep topic alive if either senders or receivers exist ([#119](https://github.com/n0-computer/iroh-gossip/issues/119)) - ([34b0e0e](https://github.com/n0-computer/iroh-gossip/commit/34b0e0ea87a2f0a6011d026bcffce78697689072))
- Clean-up connections after unexpected disconnects ([#117](https://github.com/n0-computer/iroh-gossip/issues/117)) - ([84f3945](https://github.com/n0-computer/iroh-gossip/commit/84f394577a4e8b52660a83dbbc955f0accb31b5a))
### 🧪 Testing
- Switch from tracing-test to n0-tracing-test ([#125](https://github.com/n0-computer/iroh-gossip/issues/125)) - ([48f522b](https://github.com/n0-computer/iroh-gossip/commit/48f522bb1504a7fbf4eb51112ea724fa1593a35f))
### ⚙️ Miscellaneous Tasks
- Ignore rustls-pemfile unmaintained advisory ([#122](https://github.com/n0-computer/iroh-gossip/issues/122)) - ([16c68bb](https://github.com/n0-computer/iroh-gossip/commit/16c68bb187c7127eb5694bfbbd1ec3da518bcc25))
- Upgrade to `iroh`v0.96 and the latest version of `iroh-quinn` ([#114](https://github.com/n0-computer/iroh-gossip/issues/114)) - ([13ef379](https://github.com/n0-computer/iroh-gossip/commit/13ef379f60e91f7292ae287051245db25ca8dd02))
## [0.95.0](https://github.com/n0-computer/iroh-gossip/compare/v0.94.0..0.95.0) - 2025-11-06
### Deps/refactor
- [**breaking**] Update to iroh main, port to n0-error ([#113](https://github.com/n0-computer/iroh-gossip/issues/113)) - ([4d2cb2f](https://github.com/n0-computer/iroh-gossip/commit/4d2cb2f3891e8dadd89a985fb6b5ad55d92e4c59))
## [0.94.0](https://github.com/n0-computer/iroh-gossip/compare/v0.93.1..0.94.0) - 2025-10-21
### 🚜 Refactor
- Use discovery service instead of `Endpoint::add_node_addr` ([#108](https://github.com/n0-computer/iroh-gossip/issues/108)) - ([f7e3ef4](https://github.com/n0-computer/iroh-gossip/commit/f7e3ef478a1c4f1ea934e29f3436582e68de734c))
### ⚙️ Miscellaneous Tasks
- Upgrade to iroh 0.94 ([#110](https://github.com/n0-computer/iroh-gossip/issues/110)) - ([ad78602](https://github.com/n0-computer/iroh-gossip/commit/ad78602a4bafad8db2a4264bf16fde12b08f7a5e))
## [0.93.1](https://github.com/n0-computer/iroh-gossip/compare/v0.93.0..0.93.1) - 2025-10-11
### ⚙️ Miscellaneous Tasks
- Update nightly version in CI and docs workflows ([#107](https://github.com/n0-computer/iroh-gossip/issues/107)) - ([b5e3414](https://github.com/n0-computer/iroh-gossip/commit/b5e3414f8db03910b6cea691bad69f798f1c34c6))
## [0.93.0](https://github.com/n0-computer/iroh-gossip/compare/v0.92.0..0.93.0) - 2025-10-09
### ⛰️ Features
- *(ci)* Add auto release on tag version push ([#93](https://github.com/n0-computer/iroh-gossip/issues/93)) - ([afa6e1d](https://github.com/n0-computer/iroh-gossip/commit/afa6e1dca9cef061642bece52fcdad5e877496e3))
- Set custom ALPN ([#92](https://github.com/n0-computer/iroh-gossip/issues/92)) - ([ff87b6a](https://github.com/n0-computer/iroh-gossip/commit/ff87b6a380a39c8274376ff26f874824ff80d752))
### ⚡ Performance
- Don't allocate in `Timers::wait_next` ([#102](https://github.com/n0-computer/iroh-gossip/issues/102)) - ([65278b7](https://github.com/n0-computer/iroh-gossip/commit/65278b75aa67cb6bed06c9770cacf701e952c0d3))
### ⚙️ Miscellaneous Tasks
- *(ci)* Fix url of the beta notification ([#94](https://github.com/n0-computer/iroh-gossip/issues/94)) - ([2021566](https://github.com/n0-computer/iroh-gossip/commit/20215660a71f192562477dafd677d5a974c6a7f3))
- Release prep ([#106](https://github.com/n0-computer/iroh-gossip/issues/106)) - ([099196e](https://github.com/n0-computer/iroh-gossip/commit/099196e72d806392ed609dfe36510b130839d52e))
## [0.92.0](https://github.com/n0-computer/iroh-gossip/compare/v0.91.0..0.92.0) - 2025-09-18
### ⚙️ Miscellaneous Tasks
- Upgrade `iroh`, `iroh-base`, `irpc` ([#91](https://github.com/n0-computer/iroh-gossip/issues/91)) - ([464fe69](https://github.com/n0-computer/iroh-gossip/commit/464fe69789ae8c8fefd7734a2f44db5aa447db26))
## [0.91.0](https://github.com/n0-computer/iroh-gossip/compare/v0.90.0..0.91.0) - 2025-07-31
### 🐛 Bug Fixes
- Make GossipSender `Clone` and GossipTopic `Sync` ([#81](https://github.com/n0-computer/iroh-gossip/issues/81)) - ([f215aa1](https://github.com/n0-computer/iroh-gossip/commit/f215aa13806425491cf328e42c611a0002da4371))
### 📚 Documentation
- Replace `iroh-net` mention in README ([#83](https://github.com/n0-computer/iroh-gossip/issues/83)) - ([e3df4ec](https://github.com/n0-computer/iroh-gossip/commit/e3df4ec7a56bcff0dafe6940d7d706ece5508891))
### ⚙️ Miscellaneous Tasks
- Add patch for `iroh` dependencies ([#82](https://github.com/n0-computer/iroh-gossip/issues/82)) - ([2e82a68](https://github.com/n0-computer/iroh-gossip/commit/2e82a683f93aa1ae50929da8ce95b23f85b466f1))
- [**breaking**] Prep for `v0.91.0` release ([#85](https://github.com/n0-computer/iroh-gossip/issues/85)) - ([d1fbfca](https://github.com/n0-computer/iroh-gossip/commit/d1fbfca15f484a41b8d6e8f771d14bf9fe5c7f81))
### Deps
- Update to irpc@main, iroh@main ([#84](https://github.com/n0-computer/iroh-gossip/issues/84)) - ([af7ae1f](https://github.com/n0-computer/iroh-gossip/commit/af7ae1f9bb9fa74aef97d510e062b09c03e96a87))
## [0.90.0](https://github.com/n0-computer/iroh-gossip/compare/v0.35.0..0.90.0) - 2025-06-27
### ⛰️ Features
- *(net)* Add shutdown function ([#69](https://github.com/n0-computer/iroh-gossip/issues/69)) - ([3cf2cd2](https://github.com/n0-computer/iroh-gossip/commit/3cf2cd2f3af5c79832b335b525b34db4290d0332))
### 🐛 Bug Fixes
- *(hyparview)* [**breaking**] Only add peers to active view after receiving neighbor messages ([#56](https://github.com/n0-computer/iroh-gossip/issues/56)) - ([5a441e6](https://github.com/n0-computer/iroh-gossip/commit/5a441e6cf5589fc3c7cf3c290005b1094895038c))
- *(hyparview)* Use shuffle replies as intended ([#57](https://github.com/n0-computer/iroh-gossip/issues/57)) - ([9632ced](https://github.com/n0-computer/iroh-gossip/commit/9632ced028ad7c211ac256b89d7ac0fcb32f55a6))
- *(hyparview)* Don't emit PeerData event for empty PeerData - ([c345f0a](https://github.com/n0-computer/iroh-gossip/commit/c345f0a0a6a099643f13a7a6743b308548a1c40e))
- *(plumtree)* Ensure eager relation is symmetrical - ([0abface](https://github.com/n0-computer/iroh-gossip/commit/0abface77c81dfde91c9f37da1f00bfee36f86d7))
- *(plumtree)* Clear graft timer to allow retry on new ihaves - ([b65cdce](https://github.com/n0-computer/iroh-gossip/commit/b65cdcea36c8b30ac3ab6443645c6e69795f6ebf))
### 🚜 Refactor
- *(hyparview)* Improve disconnect handling - ([5156d00](https://github.com/n0-computer/iroh-gossip/commit/5156d00f72be478872e7cecdf1b95d739b3b4fca))
- *(hyparview)* Remove obsolete parameter in hyparview - ([d954aa6](https://github.com/n0-computer/iroh-gossip/commit/d954aa62272d7f781ce762b42b06d2521e7d1b30))
- *(net)* [**breaking**] Remove `Joined` event, use `NeighborUp` ([#49](https://github.com/n0-computer/iroh-gossip/issues/49)) - ([c06f2ed](https://github.com/n0-computer/iroh-gossip/commit/c06f2ed64cb887d0714dfa1e75c0d66051c9d3e1))
- [**breaking**] Port to irpc, flatten event enum, remove cli impl ([#67](https://github.com/n0-computer/iroh-gossip/issues/67)) - ([a8d5cd2](https://github.com/n0-computer/iroh-gossip/commit/a8d5cd2b4c749993dd99f9d5eead073fd4b2498d))
- [**breaking**] Port to iroh@0.90 and n0-snafu ([#77](https://github.com/n0-computer/iroh-gossip/issues/77)) - ([1523227](https://github.com/n0-computer/iroh-gossip/commit/1523227c980c7d58efff805645aa50bea17402b0))
- [**breaking**] Change wire protocol to use uni streams per topic ([#75](https://github.com/n0-computer/iroh-gossip/issues/75)) - ([db1a135](https://github.com/n0-computer/iroh-gossip/commit/db1a13550d7b014e959fe807b45c3614e26e7105))
### 📚 Documentation
- Deny warnings for docs in CI ([#78](https://github.com/n0-computer/iroh-gossip/issues/78)) - ([b38b38f](https://github.com/n0-computer/iroh-gossip/commit/b38b38fc5970164a3c037b4d6306d8b7aee10f4f))
### 🧪 Testing
- Improve simulator ([#52](https://github.com/n0-computer/iroh-gossip/issues/52)) - ([8c30674](https://github.com/n0-computer/iroh-gossip/commit/8c306742c5823f8a6655252b1dbbbfb021c3400d))
### ⚙️ Miscellaneous Tasks
- Update clippy ([#79](https://github.com/n0-computer/iroh-gossip/issues/79)) - ([07b7b77](https://github.com/n0-computer/iroh-gossip/commit/07b7b77a8ceacad8094ec83209aa3d701a63d5b4))
- Upgrade to `iroh` at `0.90.0` and `irpc` at `0.5.0` ([#80](https://github.com/n0-computer/iroh-gossip/issues/80)) - ([0e613d8](https://github.com/n0-computer/iroh-gossip/commit/0e613d884e95203940d94b3b5363c972f4ef00d1))
### Change
- *(hyparview)* Send a ShuffleReply before disconnecting ([#59](https://github.com/n0-computer/iroh-gossip/issues/59)) - ([fd379fc](https://github.com/n0-computer/iroh-gossip/commit/fd379fc5f32ee52c2c7aad03c03c373c2ac69816))
## [0.35.0](https://github.com/n0-computer/iroh-gossip/compare/v0.34.1..0.35.0) - 2025-05-12
### 🐛 Bug Fixes
- Respect max message size when constructing IHave messages ([#63](https://github.com/n0-computer/iroh-gossip/issues/63)) - ([77c56f1](https://github.com/n0-computer/iroh-gossip/commit/77c56f1a769e561d1c8b91ebed6e02e7792bc2cb))
### 🚜 Refactor
- [**breaking**] Use new iroh-metrics version, no more global tracking ([#58](https://github.com/n0-computer/iroh-gossip/issues/58)) - ([2a37214](https://github.com/n0-computer/iroh-gossip/commit/2a372144b08f6db43f67536e8694659b4b326698))
### ⚙️ Miscellaneous Tasks
- Update dependencies ([#66](https://github.com/n0-computer/iroh-gossip/issues/66)) - ([dbec9b0](https://github.com/n0-computer/iroh-gossip/commit/dbec9b033cded5aa3e09b0c80d52bed697dfe880))
- Update to `iroh` v0.35 ([#68](https://github.com/n0-computer/iroh-gossip/issues/68)) - ([e6af27d](https://github.com/n0-computer/iroh-gossip/commit/e6af27d924db780e00b10017b18d4da3ef8db18a))
## [0.34.1](https://github.com/n0-computer/iroh-gossip/compare/v0.34.0..0.34.1) - 2025-03-24
### 🐛 Bug Fixes
- Allow instant reconnects, and always prefer newest connection ([#43](https://github.com/n0-computer/iroh-gossip/issues/43)) - ([ea1c773](https://github.com/n0-computer/iroh-gossip/commit/ea1c773659f88d7eed776b6b15cc0e559267afea))
## [0.34.0](https://github.com/n0-computer/iroh-gossip/compare/v0.33.0..0.34.0) - 2025-03-18
### 🐛 Bug Fixes
- Repo link for flaky tests ([#38](https://github.com/n0-computer/iroh-gossip/issues/38)) - ([0a03543](https://github.com/n0-computer/iroh-gossip/commit/0a03543db6aaedb7ac403e38360d5a1afc88b3f4))
### ⚙️ Miscellaneous Tasks
- Patch to use main branch of iroh dependencies ([#40](https://github.com/n0-computer/iroh-gossip/issues/40)) - ([d76305d](https://github.com/n0-computer/iroh-gossip/commit/d76305da7d75639638efcd537a1ffb13d07ef1ee))
- Update to latest iroh ([#42](https://github.com/n0-computer/iroh-gossip/issues/42)) - ([129e2e8](https://github.com/n0-computer/iroh-gossip/commit/129e2e80ec7a6efd29606fcdaf0202791a25778f))
## [0.33.0](https://github.com/n0-computer/iroh-gossip/compare/v0.32.0..0.33.0) - 2025-02-25
### ⛰️ Features
- Compile to wasm and run in browsers ([#37](https://github.com/n0-computer/iroh-gossip/issues/37)) - ([8f99f7d](https://github.com/n0-computer/iroh-gossip/commit/8f99f7d85fd8c410512b430a4ee2efd014828550))
### ⚙️ Miscellaneous Tasks
- Patch to use main branch of iroh dependencies ([#36](https://github.com/n0-computer/iroh-gossip/issues/36)) - ([7e16be8](https://github.com/n0-computer/iroh-gossip/commit/7e16be85dbf52af721aa8bb4c68723c029ce4bd2))
- Upgrade to latest `iroh` and `quic-rpc` ([#39](https://github.com/n0-computer/iroh-gossip/issues/39)) - ([a2ef813](https://github.com/n0-computer/iroh-gossip/commit/a2ef813c6033f1683162bb09d50f1f988f774cbe))
## [0.32.0](https://github.com/n0-computer/iroh-gossip/compare/v0.31.0..0.32.0) - 2025-02-04
### ⛰️ Features
- [**breaking**] Use explicit errors ([#34](https://github.com/n0-computer/iroh-gossip/issues/34)) - ([534f010](https://github.com/n0-computer/iroh-gossip/commit/534f01046332a21f6356d189c686f7c6c17af3c2))
### ⚙️ Miscellaneous Tasks
- Pin nextest version ([#29](https://github.com/n0-computer/iroh-gossip/issues/29)) - ([72b32d2](https://github.com/n0-computer/iroh-gossip/commit/72b32d25e8a810011456a2740581b3b3802f1cab))
- Remove individual repo project tracking ([#31](https://github.com/n0-computer/iroh-gossip/issues/31)) - ([8a79db6](https://github.com/n0-computer/iroh-gossip/commit/8a79db65a928ae0610d85301b009d3ec13b0fbe1))
- Update dependencies ([#35](https://github.com/n0-computer/iroh-gossip/issues/35)) - ([3c257a1](https://github.com/n0-computer/iroh-gossip/commit/3c257a1db9ea0ade0c35b060a28b1287321a532a))
## [0.31.0](https://github.com/n0-computer/iroh-gossip/compare/v0.30.1..0.31.0) - 2025-01-14
### ⚙️ Miscellaneous Tasks
- Add project tracking ([#28](https://github.com/n0-computer/iroh-gossip/issues/28)) - ([bf89c85](https://github.com/n0-computer/iroh-gossip/commit/bf89c85c3ffa78fea462d5ad7c7bae10f828d7b0))
- Upgrade to `iroh@v0.31.0` ([#30](https://github.com/n0-computer/iroh-gossip/issues/30)) - ([60f371e](https://github.com/n0-computer/iroh-gossip/commit/60f371ec61992889c390d64611e907a491812b96))
## [0.30.1](https://github.com/n0-computer/iroh-gossip/compare/v0.30.0..0.30.1) - 2024-12-20
### 🐛 Bug Fixes
- Add missing Sync bound to EventStream's inner - ([d7039c4](https://github.com/n0-computer/iroh-gossip/commit/d7039c4684e0072bce1c1fe4bce7d39ba42e8390))
## [0.30.0](https://github.com/n0-computer/iroh-gossip/compare/v0.29.0..0.30.0) - 2024-12-17
### ⛰️ Features
- Remove rpc from default features - ([10e9b68](https://github.com/n0-computer/iroh-gossip/commit/10e9b685f6ede483ace4be4360466a111dfcfec4))
- [**breaking**] Introduce builder pattern to construct Gossip ([#17](https://github.com/n0-computer/iroh-gossip/issues/17)) - ([0e6fd20](https://github.com/n0-computer/iroh-gossip/commit/0e6fd20203c6468af9d783f1e62379eca283188a))
- Update to iroh 0.30 - ([b3a5a33](https://github.com/n0-computer/iroh-gossip/commit/b3a5a33351b57e01cba816826d642f3314f00e7d))
### 🐛 Bug Fixes
- Improve connection handling ([#22](https://github.com/n0-computer/iroh-gossip/issues/22)) - ([61e64c7](https://github.com/n0-computer/iroh-gossip/commit/61e64c79961640cd2aa2412e607035cd7750f824))
- Prevent task leak for rpc handler task ([#20](https://github.com/n0-computer/iroh-gossip/issues/20)) - ([03db85d](https://github.com/n0-computer/iroh-gossip/commit/03db85d218738df7b4c39cc2d178f2f90ba58ea3))
### 🚜 Refactor
- Adapt ProtocolHandler impl ([#16](https://github.com/n0-computer/iroh-gossip/issues/16)) - ([d5285e7](https://github.com/n0-computer/iroh-gossip/commit/d5285e7240da4e233be7c8f83099741f6f272bb0))
- [**breaking**] Align api naming between RPC and direct calls - ([35d73db](https://github.com/n0-computer/iroh-gossip/commit/35d73db8a982d7bbe1eb3cba126ac25422f5c1b6))
- Manually track dials, instead of using `iroh::dialer` ([#21](https://github.com/n0-computer/iroh-gossip/issues/21)) - ([2d90828](https://github.com/n0-computer/iroh-gossip/commit/2d90828a682574e382f5b0fbc43395ff698a63e2))
### 📚 Documentation
- Add "Getting Started" to the README and add the readme to the docs ([#19](https://github.com/n0-computer/iroh-gossip/issues/19)) - ([1625123](https://github.com/n0-computer/iroh-gossip/commit/1625123a89278cb09827abe8e7ee2bf409cf2f20))
## [0.29.0](https://github.com/n0-computer/iroh-gossip/compare/v0.28.1..0.29.0) - 2024-12-04
### ⛰️ Features
- Add cli - ([16f3505](https://github.com/n0-computer/iroh-gossip/commit/16f35050fe47534052e79dcbca42da4212dc6256))
- Update to latest iroh ([#11](https://github.com/n0-computer/iroh-gossip/issues/11)) - ([89e91a3](https://github.com/n0-computer/iroh-gossip/commit/89e91a34bd046fb7fbd504b2b8d0849e2865d410))
- Reexport ALPN at top level - ([7a0ec63](https://github.com/n0-computer/iroh-gossip/commit/7a0ec63a0ab7f14d78c77f8c779b2abef956da40))
- Update to iroh@0.29.0 - ([a28327c](https://github.com/n0-computer/iroh-gossip/commit/a28327ca512407a18a3802800c6712adc33acf84))
### 🚜 Refactor
- Use hex for debugging and display - ([b487112](https://github.com/n0-computer/iroh-gossip/commit/b4871121ed1862da4459353f63415d8ae4b3f8c5))
### ⚙️ Miscellaneous Tasks
- Fixup deny.toml - ([e614d86](https://github.com/n0-computer/iroh-gossip/commit/e614d86c0a690ac4acb6b4ef394a0bf55662dcc7))
- Prune some deps ([#8](https://github.com/n0-computer/iroh-gossip/issues/8)) - ([ba0f6b0](https://github.com/n0-computer/iroh-gossip/commit/ba0f6b0f54a740d8eae7ee6683f4aa1d8d8c8eb2))
- Init changelog - ([3eb675b](https://github.com/n0-computer/iroh-gossip/commit/3eb675b6a1ad51279ce225d0b36ef9957f17aa06))
- Fix changelog generation - ([95a4611](https://github.com/n0-computer/iroh-gossip/commit/95a4611aafee248052d3dc9ef97c9bc8a26d4821))
## [0.28.1](https://github.com/n0-computer/iroh-gossip/compare/v0.28.0..v0.28.1) - 2024-11-04
### 🐛 Bug Fixes
- Update to quic-rpc@0.14 - ([7b73408](https://github.com/n0-computer/iroh-gossip/commit/7b73408e80381b77534ae3721be0421da110de80))
- Use correctly patched iroh-net - ([276e36a](https://github.com/n0-computer/iroh-gossip/commit/276e36aa1caff8d41f89d57d8aef229ffa9924cb))
### ⚙️ Miscellaneous Tasks
- Release iroh-gossip version 0.28.1 - ([efce3e1](https://github.com/n0-computer/iroh-gossip/commit/efce3e1dc991c15a7f1fc6f579f04876a22a7b1e))

4965
third_party/iroh-org/iroh-gossip/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,172 @@
[package]
name = "iroh-gossip"
version = "0.96.0"
edition = "2021"
readme = "README.md"
description = "gossip messages over broadcast trees"
license = "MIT/Apache-2.0"
authors = ["n0 team"]
repository = "https://github.com/n0-computer/iroh-gossip"
resolver = "2"
# Sadly this also needs to be updated in .github/workflows/ci.yml
rust-version = "1.89"
[lib]
crate-type = ["cdylib", "rlib"]
[lints.rust]
missing_debug_implementations = "warn"
# We use this --cfg for documenting the cargo features on which an API
# is available. To preview this locally use: RUSTFLAGS="--cfg
# iroh_docsrs cargo +nightly doc --all-features". We use our own
# iroh_docsrs instead of the common docsrs to avoid also enabling this
# feature in any dependencies, because some indirect dependencies
# require a feature enabled when using `--cfg docsrs` which we can not
# do. To enable for a crate set `#![cfg_attr(iroh_docsrs,
# feature(doc_cfg))]` in the crate.
unexpected_cfgs = { level = "warn", check-cfg = ["cfg(iroh_docsrs)"] }
[lints.clippy]
unused-async = "warn"
[dependencies]
blake3 = "1.8"
bytes = { version = "1.7", features = ["serde"] }
data-encoding = "2.6.0"
derive_more = { version = "2.0.1", features = [
"add",
"debug",
"deref",
"display",
"from",
"try_into",
"into",
] }
ed25519-dalek = { version = "3.0.0-pre.1", features = ["serde", "rand_core"] }
hex = "0.4.3"
indexmap = "2.0"
iroh-metrics = { version = "0.38", default-features = false }
iroh-base = { version = "0.96", default-features = false, features = ["key"] }
n0-future = "0.3"
postcard = { version = "1", default-features = false, features = [
"alloc",
"use-std",
"experimental-derive",
] }
rand = { version = "0.9.2", features = ["std_rng"] }
serde = { version = "1.0.164", features = ["derive"] }
# net dependencies (optional)
futures-lite = { version = "2.3", optional = true }
futures-concurrency = { version = "7.6.1", optional = true }
futures-util = { version = "0.3.30", optional = true }
iroh = { version = "0.96", default-features = false, optional = true }
tokio = { version = "1", optional = true, features = ["io-util", "sync"] }
tokio-util = { version = "0.7.12", optional = true, features = ["codec"] }
tracing = "0.1"
irpc = { version = "0.12.0", optional = true, default-features = false, features = [
"derive",
"stream",
"spans",
] }
n0-error = { version = "0.1", features = ["anyhow"] }
# rpc dependencies (optional)
quinn = { package = "iroh-quinn", version = "0.16.0", optional = true }
# test-utils dependencies (optional)
rand_chacha = { version = "0.9", optional = true }
humantime-serde = { version = "1.1.1", optional = true }
# simulator dependencies (optional)
clap = { version = "4", features = ["derive"], optional = true }
toml = { version = "0.9.8", optional = true }
tracing-subscriber = { version = "0.3", features = [
"env-filter",
], optional = true }
serde_json = { version = "1", optional = true }
rayon = { version = "1.10.0", optional = true }
comfy-table = { version = "7.1.4", optional = true }
[dev-dependencies]
tokio = { version = "1", features = [
"io-util",
"sync",
"rt",
"macros",
"net",
"fs",
] }
clap = { version = "4", features = ["derive"] }
humantime-serde = { version = "1.1.1" }
iroh = { version = "0.96", default-features = false, features = [
"metrics",
"test-utils",
] }
rand_chacha = "0.9"
testresult = "0.4.1"
tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }
n0-tracing-test = "0.3"
url = "2.4.0"
serde-byte-array = "0.1.2"
[features]
default = ["net", "metrics"]
net = [
"dep:irpc",
"dep:futures-lite",
"dep:iroh",
"dep:tokio",
"dep:tokio-util",
"dep:futures-util",
"dep:futures-concurrency",
]
rpc = [
"dep:irpc",
"dep:tokio",
"dep:quinn",
"irpc/rpc",
"irpc/quinn_endpoint_setup",
]
test-utils = ["dep:rand_chacha", "dep:humantime-serde"]
simulator = [
"test-utils",
"dep:tracing-subscriber",
"dep:toml",
"dep:clap",
"dep:serde_json",
"dep:rayon",
"dep:comfy-table",
]
metrics = ["iroh-metrics/metrics"]
examples = ["net"]
[[test]]
name = "sim"
path = "tests/sim.rs"
required-features = ["test-utils"]
[[bin]]
name = "sim"
required-features = ["simulator"]
[[example]]
name = "chat"
required-features = ["examples"]
[[example]]
name = "setup"
required-features = ["examples"]
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "iroh_docsrs"]
[profile.bench]
debug = true
[profile.release]
debug = true

View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2023] [N0, INC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,25 @@
Copyright 2023 N0, INC.
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,28 @@
# Use cargo-make to run tasks here: https://crates.io/crates/cargo-make
[tasks.format]
workspace = false
command = "cargo"
args = [
"fmt",
"--all",
"--",
"--config",
"unstable_features=true",
"--config",
"imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true",
]
[tasks.format-check]
workspace = false
command = "cargo"
args = [
"fmt",
"--all",
"--check",
"--",
"--config",
"unstable_features=true",
"--config",
"imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true",
]

View file

@ -0,0 +1,93 @@
# iroh-gossip
This crate implements the `iroh-gossip` protocol.
It is based on *epidemic broadcast trees* to disseminate messages among a swarm of peers interested in a *topic*.
The implementation is based on the papers [HyParView](https://asc.di.fct.unl.pt/~jleitao/pdf/dsn07-leitao.pdf) and [PlumTree](https://asc.di.fct.unl.pt/~jleitao/pdf/srds07-leitao.pdf).
The crate is made up from two modules:
The `proto` module is the protocol implementation, as a state machine without any IO.
The `net` module implements networking logic for running `iroh-gossip` on `iroh` connections.
The `net` module is optional behind the `net` feature flag (enabled by default).
# Getting Started
The `iroh-gossip` protocol was designed to be used in conjunction with `iroh`. [Iroh](https://docs.rs/iroh) is a networking library for making direct connections, these connections are how gossip messages are sent.
Iroh provides a [`Router`](https://docs.rs/iroh/latest/iroh/protocol/struct.Router.html) that takes an [`Endpoint`](https://docs.rs/iroh/latest/iroh/endpoint/struct.Endpoint.html) and any protocols needed for the application. Similar to a router in webserver library, it runs a loop accepting incoming connections and routes them to the specific protocol handler, based on `ALPN`.
Here is a basic example of how to set up `iroh-gossip` with `iroh`:
```rust,no_run
use iroh::{protocol::Router, Endpoint, EndpointId};
use iroh_gossip::{api::Event, Gossip, TopicId};
use n0_error::{Result, StdResultExt};
use n0_future::StreamExt;
#[tokio::main]
async fn main() -> Result<()> {
// create an iroh endpoint that includes the standard discovery mechanisms
// we've built at number0
let endpoint = Endpoint::bind().await?;
// build gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
// setup router
let router = Router::builder(endpoint)
.accept(iroh_gossip::ALPN, gossip.clone())
.spawn();
// gossip swarms are centered around a shared "topic id", which is a 32 byte identifier
let topic_id = TopicId::from_bytes([23u8; 32]);
// and you need some bootstrap peers to join the swarm
let bootstrap_peers = bootstrap_peers();
// then, you can subscribe to the topic and join your initial peers
let (sender, mut receiver) = gossip
.subscribe(topic_id, bootstrap_peers)
.await?
.split();
// you might want to wait until you joined at least one other peer:
receiver.joined().await?;
// then, you can broadcast messages to all other peers!
sender.broadcast(b"hello world this is a gossip message".to_vec().into()).await?;
// and read messages from others!
while let Some(event) = receiver.next().await {
match event? {
Event::Received(message) => {
println!("received a message: {:?}", std::str::from_utf8(&message.content));
}
_ => {}
}
}
// clean shutdown makes sure that other peers are notified that you went offline
router.shutdown().await.std_context("shutdown router")?;
Ok(())
}
fn bootstrap_peers() -> Vec<EndpointId> {
// insert your bootstrap peers here, or get them from your environment
vec![]
}
```
# License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
<http://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
<http://opensource.org/licenses/MIT>)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this project by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.

View file

@ -0,0 +1,64 @@
[changelog]
# changelog header
header = """
# Changelog\n
All notable changes to iroh-gossip will be documented in this file.\n
"""
body = """
{% if version %}\
{% if previous.version %}\
## [{{ version | trim_start_matches(pat="v") }}](<REPO>/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}\
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% endif %}\
{% else %}\
## [unreleased]
{% endif %}\
{% macro commit(commit) -%}
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\
{{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](<REPO>/commit/{{ commit.id }}))\
{% endmacro -%}
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits
| filter(attribute="scope")
| sort(attribute="scope") %}
{{ self::commit(commit=commit) }}
{%- endfor -%}
{% raw %}\n{% endraw %}\
{%- for commit in commits %}
{%- if not commit.scope -%}
{{ self::commit(commit=commit) }}
{% endif -%}
{% endfor -%}
{% endfor %}\n
"""
footer = ""
postprocessors = [
{ pattern = '<REPO>', replace = "https://github.com/n0-computer/iroh-gossip" },
{ pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh-gossip/issues/${1}))"}
]
[git]
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "<!-- 0 -->⛰️ Features" },
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
{ message = "^chore\\(release\\)", skip = true },
{ message = "^chore\\(deps\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
]

View file

@ -0,0 +1,13 @@
# Code of Conduct
Online or off, Number Zero is a harassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form.
Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately.
If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums.
If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately.
At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation!
This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Centers Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy).

View file

@ -0,0 +1,41 @@
[advisories]
ignore = [
"RUSTSEC-2024-0436",
"RUSTSEC-2023-0089",
]
[bans]
deny = [
"aws-lc",
"aws-lc-rs",
"aws-lc-sys",
"native-tls",
"openssl",
]
multiple-versions = "allow"
[licenses]
allow = [
"Apache-2.0",
"Apache-2.0 WITH LLVM-exception",
"BSD-2-Clause",
"BSD-3-Clause",
"BSL-1.0",
"CDLA-Permissive-2.0",
"ISC",
"MIT",
"Zlib",
"Unicode-3.0",
"MPL-2.0",
"Unlicense",
]
[[licenses.clarify]]
expression = "MIT AND ISC AND OpenSSL"
name = "ring"
[[licenses.clarify.license-files]]
hash = 3171872035
path = "LICENSE"
[sources]

View file

@ -0,0 +1,319 @@
use std::{
collections::HashMap,
fmt,
net::{Ipv4Addr, SocketAddrV4},
str::FromStr,
};
use bytes::Bytes;
use clap::Parser;
use futures_lite::StreamExt;
use iroh::{
address_lookup::memory::MemoryLookup, Endpoint, EndpointAddr, PublicKey, RelayMode, RelayUrl,
SecretKey,
};
use iroh_gossip::{
api::{Event, GossipReceiver},
net::{Gossip, GOSSIP_ALPN},
proto::TopicId,
};
use n0_error::{bail_any, AnyError, Result, StdResultExt};
use n0_future::task;
use serde::{Deserialize, Serialize};
use serde_byte_array::ByteArray;
/// Chat over iroh-gossip
///
/// This broadcasts signed messages over iroh-gossip and verifies signatures
/// on received messages.
///
/// By default a new endpoint id is created when starting the example. To reuse your identity,
/// set the `--secret-key` flag with the secret key printed on a previous invocation.
///
/// By default, the relay server run by n0 is used. To use a local relay server, run
/// cargo run --bin iroh-relay --features iroh-relay -- --dev
/// in another terminal and then set the `-d http://localhost:3340` flag on this example.
#[derive(Parser, Debug)]
struct Args {
/// secret key to derive our endpoint id from.
#[clap(long)]
secret_key: Option<String>,
/// Set a custom relay server. By default, the relay server hosted by n0 will be used.
#[clap(short, long)]
relay: Option<RelayUrl>,
/// Disable relay completely.
#[clap(long)]
no_relay: bool,
/// Set your nickname.
#[clap(short, long)]
name: Option<String>,
/// Set the bind port for our socket. By default, a random port will be used.
#[clap(short, long, default_value = "0")]
bind_port: u16,
#[clap(subcommand)]
command: Command,
}
#[derive(Parser, Debug)]
enum Command {
/// Open a chat room for a topic and print a ticket for others to join.
///
/// If no topic is provided, a new topic will be created.
Open {
/// Optionally set the topic id (64 bytes, as hex string).
topic: Option<TopicId>,
},
/// Join a chat room from a ticket.
Join {
/// The ticket, as base32 string.
ticket: String,
},
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let args = Args::parse();
// parse the cli command
let (topic, peers) = match &args.command {
Command::Open { topic } => {
let topic = topic.unwrap_or_else(|| TopicId::from_bytes(rand::random()));
println!("> opening chat room for topic {topic}");
(topic, vec![])
}
Command::Join { ticket } => {
let Ticket { topic, peers } = Ticket::from_str(ticket)?;
println!("> joining chat room for topic {topic}");
(topic, peers)
}
};
// parse or generate our secret key
let secret_key = match args.secret_key {
None => SecretKey::generate(&mut rand::rng()),
Some(key) => key.parse()?,
};
println!(
"> our secret key: {}",
data_encoding::HEXLOWER.encode(&secret_key.to_bytes())
);
// configure our relay map
let relay_mode = match (args.no_relay, args.relay) {
(false, None) => RelayMode::Default,
(false, Some(url)) => RelayMode::Custom(url.into()),
(true, None) => RelayMode::Disabled,
(true, Some(_)) => bail_any!("You cannot set --no-relay and --relay at the same time"),
};
println!("> using relay servers: {}", fmt_relay_mode(&relay_mode));
// create a memory lookup to pass in endpoint addresses to
let memory_lookup = MemoryLookup::new();
// build our magic endpoint
let endpoint = Endpoint::builder()
.secret_key(secret_key)
.address_lookup(memory_lookup.clone())
.relay_mode(relay_mode.clone())
.bind_addr(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, args.bind_port))?
.bind()
.await?;
println!("> our endpoint id: {}", endpoint.id());
// create the gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
// print a ticket that includes our own endpoint id and endpoint addresses
if !matches!(relay_mode, RelayMode::Disabled) {
// if we are expecting a relay, wait until we get a home relay
// before moving on
endpoint.online().await;
}
let ticket = {
let me = endpoint.addr();
let peers = peers.iter().cloned().chain([me]).collect();
Ticket { topic, peers }
};
println!("> ticket to join us: {ticket}");
// setup router
let router = iroh::protocol::Router::builder(endpoint.clone())
.accept(GOSSIP_ALPN, gossip.clone())
.spawn();
// join the gossip topic by connecting to known peers, if any
let peer_ids = peers.iter().map(|p| p.id).collect();
if peers.is_empty() {
println!("> waiting for peers to join us...");
} else {
println!("> trying to connect to {} peers...", peers.len());
// add the peer addrs from the ticket to our endpoint's addressbook so that they can be dialed
for peer in peers.into_iter() {
memory_lookup.add_endpoint_info(peer);
}
};
let (sender, receiver) = gossip.subscribe_and_join(topic, peer_ids).await?.split();
println!("> connected!");
// broadcast our name, if set
if let Some(name) = args.name {
let message = Message::AboutMe { name };
let encoded_message = SignedMessage::sign_and_encode(endpoint.secret_key(), &message)?;
sender.broadcast(encoded_message).await?;
}
// subscribe and print loop
task::spawn(subscribe_loop(receiver));
// spawn an input thread that reads stdin
// not using tokio here because they recommend this for "technical reasons"
let (line_tx, mut line_rx) = tokio::sync::mpsc::channel(1);
std::thread::spawn(move || input_loop(line_tx));
// broadcast each line we type
println!("> type a message and hit enter to broadcast...");
while let Some(text) = line_rx.recv().await {
let message = Message::Message { text: text.clone() };
let encoded_message = SignedMessage::sign_and_encode(endpoint.secret_key(), &message)?;
sender.broadcast(encoded_message).await?;
println!("> sent: {text}");
}
// shutdown
router.shutdown().await.anyerr()?;
Ok(())
}
async fn subscribe_loop(mut receiver: GossipReceiver) -> Result<()> {
// init a peerid -> name hashmap
let mut names = HashMap::new();
while let Some(event) = receiver.try_next().await? {
if let Event::Received(msg) = event {
let (from, message) = SignedMessage::verify_and_decode(&msg.content)?;
match message {
Message::AboutMe { name } => {
names.insert(from, name.clone());
println!("> {} is now known as {}", from.fmt_short(), name);
}
Message::Message { text } => {
let name = names
.get(&from)
.map_or_else(|| from.fmt_short().to_string(), String::to_string);
println!("{name}: {text}");
}
}
}
}
Ok(())
}
fn input_loop(line_tx: tokio::sync::mpsc::Sender<String>) -> Result<()> {
let mut buffer = String::new();
let stdin = std::io::stdin(); // We get `Stdin` here.
loop {
stdin.read_line(&mut buffer).anyerr()?;
line_tx.blocking_send(buffer.clone()).anyerr()?;
buffer.clear();
}
}
const SIGNATURE_LENGTH: usize = iroh::Signature::LENGTH;
type Signature = ByteArray<SIGNATURE_LENGTH>;
#[derive(Debug, Serialize, Deserialize)]
struct SignedMessage {
from: PublicKey,
data: Bytes,
signature: Signature,
}
impl SignedMessage {
pub fn verify_and_decode(bytes: &[u8]) -> Result<(PublicKey, Message)> {
let signed_message: Self =
postcard::from_bytes(bytes).std_context("decode signed message")?;
let key: PublicKey = signed_message.from;
key.verify(
&signed_message.data,
&iroh::Signature::from_bytes(&signed_message.signature),
)
.std_context("verify signature")?;
let message: Message =
postcard::from_bytes(&signed_message.data).std_context("decode message")?;
Ok((signed_message.from, message))
}
pub fn sign_and_encode(secret_key: &SecretKey, message: &Message) -> Result<Bytes> {
let data: Bytes = postcard::to_stdvec(&message)
.std_context("encode message")?
.into();
let signature = secret_key.sign(&data);
let from: PublicKey = secret_key.public();
let signed_message = Self {
from,
data,
signature: ByteArray::new(signature.to_bytes()),
};
let encoded = postcard::to_stdvec(&signed_message).std_context("encode signed message")?;
Ok(encoded.into())
}
}
#[derive(Debug, Serialize, Deserialize)]
enum Message {
AboutMe { name: String },
Message { text: String },
}
#[derive(Debug, Serialize, Deserialize)]
struct Ticket {
topic: TopicId,
peers: Vec<EndpointAddr>,
}
impl Ticket {
/// Deserializes from bytes.
fn from_bytes(bytes: &[u8]) -> Result<Self> {
postcard::from_bytes(bytes).std_context("decode ticket")
}
/// Serializes to bytes.
pub fn to_bytes(&self) -> Vec<u8> {
postcard::to_stdvec(self).expect("postcard::to_stdvec is infallible")
}
}
/// Serializes to base32.
impl fmt::Display for Ticket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut text = data_encoding::BASE32_NOPAD.encode(&self.to_bytes()[..]);
text.make_ascii_lowercase();
write!(f, "{text}")
}
}
/// Deserializes from base32.
impl FromStr for Ticket {
type Err = AnyError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = data_encoding::BASE32_NOPAD
.decode(s.to_ascii_uppercase().as_bytes())
.std_context("decode ticket base32")?;
Self::from_bytes(&bytes)
}
}
// helpers
fn fmt_relay_mode(relay_mode: &RelayMode) -> String {
match relay_mode {
RelayMode::Disabled => "None".to_string(),
RelayMode::Default => "Default Relay (production) servers".to_string(),
RelayMode::Staging => "Default Relay (staging) servers".to_string(),
RelayMode::Custom(map) => map
.urls::<Vec<_>>()
.into_iter()
.map(|url| url.to_string())
.collect::<Vec<_>>()
.join(", "),
}
}

View file

@ -0,0 +1,21 @@
use iroh::{protocol::Router, Endpoint};
use iroh_gossip::{net::Gossip, ALPN};
use n0_error::{Result, StdResultExt};
#[tokio::main]
async fn main() -> Result<()> {
// create an iroh endpoint that includes the standard address lookup mechanisms
// we've built at number0
let endpoint = Endpoint::bind().await?;
// build gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
// setup router
let router = Router::builder(endpoint.clone())
.accept(ALPN, gossip.clone())
.spawn();
// do fun stuff with the gossip protocol
router.shutdown().await.std_context("shutdown router")?;
Ok(())
}

View file

@ -0,0 +1 @@
pre-release-hook = ["git", "cliff", "--prepend", "CHANGELOG.md", "--tag", "{{version}}", "--unreleased" ]

View file

@ -0,0 +1,35 @@
seeds = [0, 1, 212312388123, 123]
config.latency.dynamic = { min = "10ms", max = "50ms" }
[[scenario]]
sim = "GossipSingle"
nodes = 20
[[scenario]]
sim = "GossipSingle"
nodes = 100
[[scenario]]
sim = "GossipSingle"
nodes = 1000
[[scenario]]
sim = "GossipMulti"
nodes = 20
[[scenario]]
sim = "GossipMulti"
nodes = 100
[[scenario]]
sim = "GossipMulti"
nodes = 1000
[[scenario]]
sim = "GossipAll"
nodes = 20
[[scenario]]
sim = "GossipAll"
nodes = 100
rounds = 5

View file

@ -0,0 +1,535 @@
//! Public API for using iroh-gossip
//!
//! The API is usable both locally and over RPC.
use std::{
collections::{BTreeSet, HashSet},
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use iroh_base::EndpointId;
use irpc::{channel::mpsc, rpc_requests, Client};
use n0_error::{e, stack_error};
use n0_future::{Stream, StreamExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use crate::proto::{DeliveryScope, TopicId};
/// Default channel capacity for topic subscription channels (one per topic)
const TOPIC_EVENTS_DEFAULT_CAP: usize = 2048;
/// Channel capacity for topic command send channels.
const TOPIC_COMMANDS_CAP: usize = 64;
/// Input messages for the gossip actor.
#[rpc_requests(message = RpcMessage, rpc_feature = "rpc")]
#[derive(Debug, Serialize, Deserialize)]
pub(crate) enum Request {
#[rpc(tx=mpsc::Sender<Event>, rx=mpsc::Receiver<Command>)]
Join(JoinRequest),
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct JoinRequest {
pub topic_id: TopicId,
pub bootstrap: BTreeSet<EndpointId>,
}
#[allow(missing_docs)]
#[stack_error(derive, add_meta, from_sources)]
#[non_exhaustive]
pub enum ApiError {
#[error(transparent)]
Rpc { source: irpc::Error },
/// The gossip topic was closed.
#[error("topic closed")]
Closed,
}
impl From<irpc::channel::SendError> for ApiError {
fn from(value: irpc::channel::SendError) -> Self {
irpc::Error::from(value).into()
}
}
impl From<irpc::channel::mpsc::RecvError> for ApiError {
fn from(value: irpc::channel::mpsc::RecvError) -> Self {
irpc::Error::from(value).into()
}
}
impl From<irpc::channel::oneshot::RecvError> for ApiError {
fn from(value: irpc::channel::oneshot::RecvError) -> Self {
irpc::Error::from(value).into()
}
}
/// API to control a [`Gossip`] instance.
///
/// This has methods to subscribe and join gossip topics, which return handles to publish
/// and receive messages on topics.
///
/// [`Gossip`] derefs to [`GossipApi`], so all functions on [`GossipApi`] are directly callable
/// from [`Gossip`].
///
/// Additionally, a [`GossipApi`] can be created by connecting to an RPC server. See [`Gossip::listen`]
/// and [`GossipApi::connect`] (*requires the `rpc` feature).
///
/// [`Gossip`]: crate::net::Gossip
/// [`Gossip::listen`]: crate::net::Gossip::listen
#[derive(Debug, Clone)]
pub struct GossipApi {
client: Client<Request>,
}
impl GossipApi {
#[cfg(feature = "net")]
pub(crate) fn local(tx: tokio::sync::mpsc::Sender<RpcMessage>) -> Self {
let local = irpc::LocalSender::<Request>::from(tx);
Self {
client: local.into(),
}
}
/// Connect to a remote as a RPC client.
#[cfg(feature = "rpc")]
pub fn connect(endpoint: quinn::Endpoint, addr: std::net::SocketAddr) -> Self {
let inner = irpc::Client::quinn(endpoint, addr);
Self { client: inner }
}
/// Listen on a quinn endpoint for incoming RPC connections.
#[cfg(all(feature = "rpc", feature = "net"))]
pub(crate) async fn listen(&self, endpoint: quinn::Endpoint) {
use irpc::rpc::{listen, RemoteService};
let local = self
.client
.as_local()
.expect("cannot listen on remote client");
let handler = Request::remote_handler(local);
listen::<Request>(endpoint, handler).await
}
/// Join a gossip topic with options.
///
/// Returns a [`GossipTopic`] instantly. To wait for at least one connection to be established,
/// you can await [`GossipTopic::joined`].
///
/// Messages will be queued until a first connection is available. If the internal channel becomes full,
/// the oldest messages will be dropped from the channel.
pub async fn subscribe_with_opts(
&self,
topic_id: TopicId,
opts: JoinOptions,
) -> Result<GossipTopic, ApiError> {
let req = JoinRequest {
topic_id,
bootstrap: opts.bootstrap,
};
let (tx, rx) = self
.client
.bidi_streaming(req, TOPIC_COMMANDS_CAP, opts.subscription_capacity)
.await?;
Ok(GossipTopic::new(tx, rx))
}
/// Join a gossip topic with the default options and wait for at least one active connection.
pub async fn subscribe_and_join(
&self,
topic_id: TopicId,
bootstrap: Vec<EndpointId>,
) -> Result<GossipTopic, ApiError> {
let mut sub = self
.subscribe_with_opts(topic_id, JoinOptions::with_bootstrap(bootstrap))
.await?;
sub.joined().await?;
Ok(sub)
}
/// Join a gossip topic with the default options.
///
/// Note that this will not wait for any bootstrap endpoint to be available.
/// To ensure the topic is connected to at least one endpoint, use [`GossipTopic::joined`]
/// or [`Self::subscribe_and_join`]
pub async fn subscribe(
&self,
topic_id: TopicId,
bootstrap: Vec<EndpointId>,
) -> Result<GossipTopic, ApiError> {
let sub = self
.subscribe_with_opts(topic_id, JoinOptions::with_bootstrap(bootstrap))
.await?;
Ok(sub)
}
}
/// Sender for a gossip topic.
#[derive(Debug, Clone)]
pub struct GossipSender(mpsc::Sender<Command>);
impl GossipSender {
pub(crate) fn new(sender: mpsc::Sender<Command>) -> Self {
Self(sender)
}
/// Broadcasts a message to all endpoints.
pub async fn broadcast(&self, message: Bytes) -> Result<(), ApiError> {
self.send(Command::Broadcast(message)).await?;
Ok(())
}
/// Broadcasts a message to our direct neighbors.
pub async fn broadcast_neighbors(&self, message: Bytes) -> Result<(), ApiError> {
self.send(Command::BroadcastNeighbors(message)).await?;
Ok(())
}
/// Joins a set of peers.
pub async fn join_peers(&self, peers: Vec<EndpointId>) -> Result<(), ApiError> {
self.send(Command::JoinPeers(peers)).await?;
Ok(())
}
async fn send(&self, command: Command) -> Result<(), irpc::channel::SendError> {
self.0.send(command).await?;
Ok(())
}
}
/// Subscribed gossip topic.
///
/// This handle is a [`Stream`] of [`Event`]s from the topic, and can be used to send messages.
///
/// Once the [`GossipTopic`] is dropped, the network actor will leave the gossip topic.
///
/// It may be split into sender and receiver parts with [`Self::split`]. In this case, the topic will
/// be left once both the [`GossipSender`] and [`GossipReceiver`] halves are dropped.
#[derive(Debug)]
pub struct GossipTopic {
sender: GossipSender,
receiver: GossipReceiver,
}
impl GossipTopic {
pub(crate) fn new(sender: mpsc::Sender<Command>, receiver: mpsc::Receiver<Event>) -> Self {
let sender = GossipSender::new(sender);
Self {
sender,
receiver: GossipReceiver::new(receiver),
}
}
/// Splits `self` into [`GossipSender`] and [`GossipReceiver`] parts.
pub fn split(self) -> (GossipSender, GossipReceiver) {
(self.sender, self.receiver)
}
/// Sends a message to all peers.
pub async fn broadcast(&mut self, message: Bytes) -> Result<(), ApiError> {
self.sender.broadcast(message).await
}
/// Sends a message to our direct neighbors in the swarm.
pub async fn broadcast_neighbors(&mut self, message: Bytes) -> Result<(), ApiError> {
self.sender.broadcast_neighbors(message).await
}
/// Lists our current direct neighbors.
pub fn neighbors(&self) -> impl Iterator<Item = EndpointId> + '_ {
self.receiver.neighbors()
}
/// Waits until we are connected to at least one endpoint.
///
/// See [`GossipReceiver::joined`] for details.
pub async fn joined(&mut self) -> Result<(), ApiError> {
self.receiver.joined().await
}
/// Returns `true` if we are connected to at least one endpoint.
pub fn is_joined(&self) -> bool {
self.receiver.is_joined()
}
}
impl Stream for GossipTopic {
type Item = Result<Event, ApiError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.receiver).poll_next(cx)
}
}
/// Receiver for gossip events on a topic.
///
/// This is a [`Stream`] of [`Event`]s emitted from the topic.
#[derive(derive_more::Debug)]
pub struct GossipReceiver {
#[debug("BoxStream")]
stream: Pin<Box<dyn Stream<Item = Result<Event, ApiError>> + Send + Sync + 'static>>,
neighbors: HashSet<EndpointId>,
}
impl GossipReceiver {
pub(crate) fn new(events_rx: mpsc::Receiver<Event>) -> Self {
let stream = events_rx.into_stream().map_err(ApiError::from);
let stream = Box::pin(stream);
Self {
stream,
neighbors: Default::default(),
}
}
/// Lists our current direct neighbors.
pub fn neighbors(&self) -> impl Iterator<Item = EndpointId> + '_ {
self.neighbors.iter().copied()
}
/// Waits until we are connected to at least one endpoint.
///
/// Progresses the event stream to the first [`Event::NeighborUp`] event.
///
/// Note that this consumes this initial `NeighborUp` event. If you want to track
/// neighbors, use [`Self::neighbors`] after awaiting [`Self::joined`], and then
/// continue to track `NeighborUp` events on the event stream.
pub async fn joined(&mut self) -> Result<(), ApiError> {
while !self.is_joined() {
let _event = self.next().await.ok_or(e!(ApiError::Closed))??;
}
Ok(())
}
/// Returns `true` if we are connected to at least one endpoint.
pub fn is_joined(&self) -> bool {
!self.neighbors.is_empty()
}
}
impl Stream for GossipReceiver {
type Item = Result<Event, ApiError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let item = std::task::ready!(Pin::new(&mut self.stream).poll_next(cx));
if let Some(Ok(item)) = &item {
match item {
Event::NeighborUp(endpoint_id) => {
self.neighbors.insert(*endpoint_id);
}
Event::NeighborDown(endpoint_id) => {
self.neighbors.remove(endpoint_id);
}
_ => {}
}
}
Poll::Ready(item)
}
}
/// Events emitted from a gossip topic.
///
/// These are the events emitted from a [`GossipReceiver`].
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Serialize, Deserialize)]
pub enum Event {
/// We have a new, direct neighbor in the swarm membership layer for this topic.
NeighborUp(EndpointId),
/// We dropped direct neighbor in the swarm membership layer for this topic.
NeighborDown(EndpointId),
/// We received a gossip message for this topic.
Received(Message),
/// We missed some messages because our [`GossipReceiver`] was not progressing fast enough.
Lagged,
}
impl From<crate::proto::Event<EndpointId>> for Event {
fn from(event: crate::proto::Event<EndpointId>) -> Self {
match event {
crate::proto::Event::NeighborUp(endpoint_id) => Self::NeighborUp(endpoint_id),
crate::proto::Event::NeighborDown(endpoint_id) => Self::NeighborDown(endpoint_id),
crate::proto::Event::Received(message) => Self::Received(Message {
content: message.content,
scope: message.scope,
delivered_from: message.delivered_from,
}),
}
}
}
/// A gossip message
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, derive_more::Debug, Serialize, Deserialize)]
pub struct Message {
/// The content of the message
#[debug("Bytes({})", self.content.len())]
pub content: Bytes,
/// The scope of the message.
/// This tells us if the message is from a direct neighbor or actual gossip.
pub scope: DeliveryScope,
/// The endpoint that delivered the message. This is not the same as the original author.
pub delivered_from: EndpointId,
}
/// Command for a gossip topic.
#[derive(Serialize, Deserialize, derive_more::Debug, Clone)]
pub enum Command {
/// Broadcasts a message to all endpoints in the swarm.
Broadcast(#[debug("Bytes({})", _0.len())] Bytes),
/// Broadcasts a message to all direct neighbors.
BroadcastNeighbors(#[debug("Bytes({})", _0.len())] Bytes),
/// Connects to a set of peers.
JoinPeers(Vec<EndpointId>),
}
/// Options for joining a gossip topic.
#[derive(Serialize, Deserialize, Debug)]
pub struct JoinOptions {
/// The initial bootstrap endpoints.
pub bootstrap: BTreeSet<EndpointId>,
/// The maximum number of messages that can be buffered in a subscription.
///
/// If this limit is reached, the subscriber will receive a `Lagged` response,
/// the message will be dropped, and the subscriber will be closed.
///
/// This is to prevent a single slow subscriber from blocking the dispatch loop.
/// If a subscriber is lagging, it should be closed and re-opened.
pub subscription_capacity: usize,
}
impl JoinOptions {
/// Creates [`JoinOptions`] with the provided bootstrap endpoints and the default subscription
/// capacity.
pub fn with_bootstrap(endpoints: impl IntoIterator<Item = EndpointId>) -> Self {
Self {
bootstrap: endpoints.into_iter().collect(),
subscription_capacity: TOPIC_EVENTS_DEFAULT_CAP,
}
}
}
#[cfg(test)]
mod tests {
use crate::api::GossipTopic;
#[cfg(all(feature = "rpc", feature = "net"))]
#[tokio::test]
#[n0_tracing_test::traced_test]
async fn test_rpc() -> n0_error::Result<()> {
use iroh::{address_lookup::memory::MemoryLookup, protocol::Router, RelayMap};
use n0_error::{AnyError, Result, StackResultExt, StdResultExt};
use n0_future::{time::Duration, StreamExt};
use rand_chacha::rand_core::SeedableRng;
use crate::{
api::{Event, GossipApi},
net::{test::create_endpoint, Gossip},
proto::TopicId,
ALPN,
};
let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1);
let (relay_map, _relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
async fn create_gossip_endpoint(
rng: &mut rand_chacha::ChaCha12Rng,
relay_map: RelayMap,
) -> Result<(Router, Gossip)> {
let endpoint = create_endpoint(rng, relay_map, None).await?;
let gossip = Gossip::builder().spawn(endpoint.clone());
let router = Router::builder(endpoint)
.accept(ALPN, gossip.clone())
.spawn();
Ok((router, gossip))
}
let topic_id = TopicId::from_bytes([0u8; 32]);
// create our gossip endpoint
let (router, gossip) = create_gossip_endpoint(&mut rng, relay_map.clone()).await?;
// create a second endpoint so that we can test actually joining
let (endpoint2_id, endpoint2_addr, endpoint2_task) = {
let (router, gossip) = create_gossip_endpoint(&mut rng, relay_map.clone()).await?;
let endpoint_addr = router.endpoint().addr();
let endpoint_id = router.endpoint().id();
let task = tokio::task::spawn(async move {
let mut topic = gossip.subscribe_and_join(topic_id, vec![]).await?;
topic.broadcast(b"hello".to_vec().into()).await?;
Ok::<_, AnyError>(router)
});
(endpoint_id, endpoint_addr, task)
};
// create a memory lookup service to add endpoint addr manually
let memory_lookup = MemoryLookup::new();
memory_lookup.add_endpoint_info(endpoint2_addr);
router.endpoint().address_lookup().add(memory_lookup);
// expose the gossip endpoint over RPC
let (rpc_server_endpoint, rpc_server_cert) =
irpc::util::make_server_endpoint("127.0.0.1:0".parse().unwrap())
.context("make server endpoint")?;
let rpc_server_addr = rpc_server_endpoint
.local_addr()
.std_context("resolve server addr")?;
let rpc_server_task = tokio::task::spawn(async move {
gossip.listen(rpc_server_endpoint).await;
});
// connect to the RPC endpoint with a new client
let rpc_client_endpoint =
irpc::util::make_client_endpoint("127.0.0.1:0".parse().unwrap(), &[&rpc_server_cert])
.context("make client endpoint")?;
let rpc_client = GossipApi::connect(rpc_client_endpoint, rpc_server_addr);
// join via RPC
let recv = async move {
let mut topic = rpc_client
.subscribe_and_join(topic_id, vec![endpoint2_id])
.await?;
// wait for a message
while let Some(event) = topic.try_next().await? {
match event {
Event::Received(message) => {
assert_eq!(&message.content[..], b"hello");
break;
}
Event::Lagged => panic!("unexpected lagged event"),
_ => {}
}
}
Ok::<_, AnyError>(())
};
// timeout to not hang in case of failure
tokio::time::timeout(Duration::from_secs(10), recv)
.await
.std_context("rpc recv timeout")??;
// shutdown
rpc_server_task.abort();
router.shutdown().await.std_context("shutdown router")?;
let router2 = endpoint2_task.await.std_context("join endpoint task")??;
router2
.shutdown()
.await
.std_context("shutdown second router")?;
Ok(())
}
#[test]
fn ensure_gossip_topic_is_sync() {
#[allow(unused)]
fn get() -> GossipTopic {
unimplemented!()
}
#[allow(unused)]
fn check(_t: impl Sync) {}
#[allow(unused)]
fn foo() {
check(get());
}
}
}

View file

@ -0,0 +1,418 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
};
use clap::Parser;
use comfy_table::{presets::NOTHING, Cell, CellAlignment, Table};
use iroh_gossip::proto::sim::{
BootstrapMode, NetworkConfig, RoundStats, RoundStatsAvg, RoundStatsDiff, Simulator,
SimulatorConfig,
};
use n0_error::{Result, StackResultExt, StdResultExt};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use serde::{Deserialize, Serialize};
use tracing::{error_span, info, warn};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[allow(clippy::enum_variant_names)]
enum Simulation {
/// A single sender broadcasts a single message per round.
GossipSingle,
/// Each round a different sender is chosen at random, and broadcasts a single message
GossipMulti,
/// Each round, all peers broadcast a single message simultaneously.
GossipAll,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ScenarioDescription {
sim: Simulation,
nodes: u32,
#[serde(default)]
bootstrap: BootstrapMode,
#[serde(default = "defaults::rounds")]
rounds: u32,
config: Option<NetworkConfig>,
}
impl ScenarioDescription {
pub fn label(&self) -> String {
let &ScenarioDescription {
sim,
nodes,
rounds,
config: _,
bootstrap: _,
} = &self;
format!("{sim:?}-n{nodes}-r{rounds}")
}
}
mod defaults {
pub fn rounds() -> u32 {
30
}
}
#[derive(Debug, Serialize, Deserialize)]
struct SimConfig {
seeds: Vec<u64>,
config: Option<NetworkConfig>,
scenario: Vec<ScenarioDescription>,
}
#[derive(Debug, Parser)]
struct Cli {
#[clap(subcommand)]
command: Command,
}
#[derive(Debug, Parser)]
enum Command {
/// Run simulations
Run {
#[clap(short, long)]
config_path: PathBuf,
#[clap(short, long)]
out_dir: Option<PathBuf>,
#[clap(short, long)]
baseline: Option<PathBuf>,
#[clap(short, long)]
single_threaded: bool,
#[clap(short, long)]
filter: Vec<String>,
},
/// Compare simulation runs
Compare {
baseline: PathBuf,
current: PathBuf,
#[clap(short, long)]
filter: Vec<String>,
},
}
fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let args: Cli = Cli::parse();
match args.command {
Command::Run {
config_path,
out_dir,
baseline,
single_threaded,
filter,
} => {
let config_text = std::fs::read_to_string(&config_path)
.with_std_context(|_| format!("read config {}", config_path.display()))?;
let config: SimConfig = toml::from_str(&config_text).std_context("parse config")?;
let base_config = config.config.unwrap_or_default();
info!("base config: {base_config:?}");
let seeds = config.seeds;
let mut scenarios = config.scenario;
for scenario in scenarios.iter_mut() {
scenario.config.get_or_insert_with(|| base_config.clone());
}
if let Some(out_dir) = out_dir.as_ref() {
std::fs::create_dir_all(out_dir)
.with_std_context(|_| format!("create output dir {}", out_dir.display()))?;
}
let filter_fn = |s: &ScenarioDescription| {
let label = s.label();
if filter.is_empty() {
true
} else {
filter.iter().any(|x| x == &label)
}
};
let results: Result<Vec<_>> = if !single_threaded {
scenarios
.into_par_iter()
.filter(filter_fn)
.map(|scenario| run_and_save_simulation(scenario, &seeds, out_dir.as_ref()))
.collect()
} else {
scenarios
.into_iter()
.filter(filter_fn)
.map(|scenario| run_and_save_simulation(scenario, &seeds, out_dir.as_ref()))
.collect()
};
let mut results = results?;
results.sort_by_key(|a| a.scenario.label());
for result in results {
print_result(&result);
}
if let (Some(baseline), Some(out_dir)) = (baseline, out_dir) {
compare_dirs(baseline, out_dir, filter)?;
}
}
Command::Compare {
baseline,
current,
filter,
} => {
compare_dirs(baseline, current, filter)?;
}
}
Ok(())
}
fn run_and_save_simulation(
scenario: ScenarioDescription,
seeds: &[u64],
out_dir: Option<impl AsRef<Path>>,
) -> Result<SimulationResults> {
let label = scenario.label();
if let Some(out_dir) = out_dir.as_ref() {
let path = out_dir.as_ref().join(format!("{label}.config.toml"));
let encoded = toml::to_string(&scenario).std_context("encode scenario")?;
std::fs::write(&path, encoded)
.with_std_context(|_| format!("write scenario {}", &path.display()))?;
}
let result = run_simulation(seeds, scenario);
if let Some(out_dir) = out_dir.as_ref() {
let path = out_dir.as_ref().join(format!("{label}.results.json"));
let encoded = serde_json::to_string(&result).std_context("encode results")?;
std::fs::write(&path, encoded)
.with_std_context(|_| format!("write results {}", path.display()))?;
}
Ok(result)
}
#[derive(Debug, Serialize, Deserialize, Clone)]
struct SimulationResults {
scenario: ScenarioDescription,
/// Maps seeds to results
results: HashMap<u64, RoundStatsAvg>,
average: Option<RoundStatsAvg>,
}
impl SimulationResults {
fn load_from_file(path: impl AsRef<Path>) -> Result<Self> {
let s = std::fs::read_to_string(path.as_ref())
.with_std_context(|_| format!("read results {}", path.as_ref().display()))?;
let out = serde_json::from_str(&s).std_context("decode results")?;
Ok(out)
}
}
fn run_simulation(seeds: &[u64], scenario: ScenarioDescription) -> SimulationResults {
let mut results = HashMap::new();
let network_config = scenario.config.clone().unwrap_or_default();
for &seed in seeds {
let span = error_span!("sim", name=%scenario.label(), %seed);
let _guard = span.enter();
let sim_config = SimulatorConfig {
rng_seed: seed,
peers: scenario.nodes as usize,
..Default::default()
};
let bootstrap = scenario.bootstrap.clone();
let mut simulator = Simulator::new(sim_config, network_config.clone());
info!("start");
let outcome = simulator.bootstrap(bootstrap);
if outcome.has_peers_with_no_neighbors() {
warn!("not all nodes active after bootstrap: {outcome:?}");
} else {
info!("bootstrapped, all nodes active");
}
let result = match scenario.sim {
Simulation::GossipSingle => BigSingle.run(simulator, scenario.rounds as usize),
Simulation::GossipMulti => BigMulti.run(simulator, scenario.rounds as usize),
Simulation::GossipAll => BigAll.run(simulator, scenario.rounds as usize),
};
info!("done");
results.insert(seed, result);
}
let stats: Vec<_> = results.values().cloned().collect();
let average = if !stats.is_empty() {
let avg = RoundStatsAvg::avg(&stats);
Some(avg)
} else {
None
};
SimulationResults {
average,
results,
scenario,
}
}
fn print_result(r: &SimulationResults) {
let seeds = r.results.len();
println!("{} with {seeds} seeds", r.scenario.label());
let Some(avg) = r.average.as_ref() else {
println!("no results, simulation did not complete");
return;
};
let mut table = Table::new();
let header = ["", "RMR", "LDH", "missed", "duration"]
.into_iter()
.map(|s| Cell::new(s).set_alignment(CellAlignment::Right));
table
.load_preset(NOTHING)
.set_header(header)
.add_row(fmt_round("mean", &avg.mean))
.add_row(fmt_round("max", &avg.max))
.add_row(fmt_round("min", &avg.min));
println!("{table}");
if avg.max.missed > 0.0 {
println!("WARN: Messages were missed!")
}
println!();
}
trait Scenario {
fn run(self, sim: Simulator, rounds: usize) -> RoundStatsAvg;
}
struct BigSingle;
impl Scenario for BigSingle {
fn run(self, mut simulator: Simulator, rounds: usize) -> RoundStatsAvg {
let from = simulator.random_peer();
for i in 0..rounds {
let message = format!("m{i}").into_bytes().into();
let messages = vec![(from, message)];
simulator.gossip_round(messages);
}
simulator.round_stats_average()
}
}
struct BigMulti;
impl Scenario for BigMulti {
fn run(self, mut simulator: Simulator, rounds: usize) -> RoundStatsAvg {
for i in 0..rounds {
let from = simulator.random_peer();
let message = format!("m{i}").into_bytes().into();
let messages = vec![(from, message)];
simulator.gossip_round(messages);
}
simulator.round_stats_average()
}
}
struct BigAll;
impl Scenario for BigAll {
fn run(self, mut simulator: Simulator, rounds: usize) -> RoundStatsAvg {
let messages_per_peer = 1;
for i in 0..rounds {
let mut messages = vec![];
for id in simulator.network.peer_ids() {
for j in 0..messages_per_peer {
let message: bytes::Bytes = format!("{i}:{j}.{id}").into_bytes().into();
messages.push((id, message));
}
}
simulator.gossip_round(messages);
}
simulator.round_stats_average()
}
}
fn compare_dirs(baseline_dir: PathBuf, current_path: PathBuf, filter: Vec<String>) -> Result<()> {
let mut paths = vec![];
for entry in std::fs::read_dir(&current_path)
.with_std_context(|_| format!("read directory {}", current_path.display()))?
.filter_map(Result::ok)
.filter(|x| x.path().is_file())
{
let current_file = entry.path().to_owned();
let Some(filename) = current_file.file_name().and_then(|s| s.to_str()) else {
continue;
};
let Some(basename) = filename.strip_suffix(".results.json") else {
continue;
};
if !filter.is_empty() && !filter.iter().any(|x| x == basename) {
continue;
}
let baseline_file = baseline_dir.join(filename);
if !baseline_file.exists() {
println!("skip {filename} (not in baseline)");
}
paths.push((basename.to_string(), baseline_file, current_file));
}
paths.sort();
for (basename, baseline_file, current_file) in paths {
println!("comparing {basename}");
if let Err(err) = compare_files(&baseline_file, &current_file) {
println!(" skip (reason: {err:#}");
}
}
Ok(())
}
fn compare_files(baseline: impl AsRef<Path>, current: impl AsRef<Path>) -> Result<()> {
let baseline =
SimulationResults::load_from_file(baseline.as_ref()).context("failed to load baseline")?;
let current =
SimulationResults::load_from_file(current.as_ref()).context("failed to load current")?;
compare_results(baseline, current);
Ok(())
}
fn compare_results(baseline: SimulationResults, current: SimulationResults) {
match (baseline.average, current.average) {
(None, Some(_avg)) => {
println!("baseline run did not finish");
}
(Some(_avg), None) => {
println!("current run did not finish");
}
(None, None) => println!("both runs did not finish"),
(Some(baseline), Some(current)) => {
let diff = baseline.diff(&current);
let mut table = Table::new();
let header = ["", "RMR", "LDH", "missed", "duration"]
.into_iter()
.map(|s| Cell::new(s).set_alignment(CellAlignment::Right));
table
.load_preset(NOTHING)
.set_header(header)
.add_row(fmt_diff_round("mean", &diff.mean))
.add_row(fmt_diff_round("max", &diff.max))
.add_row(fmt_diff_round("min", &diff.min));
println!("{table}");
}
}
}
fn fmt_round(label: &str, round: &RoundStats) -> Vec<Cell> {
[
label.to_string(),
format!("{:.2}", round.rmr),
format!("{:.2}", round.ldh),
format!("{:.2}", round.missed),
format!("{}ms", round.duration.as_millis()),
]
.into_iter()
.map(|s| Cell::new(s).set_alignment(CellAlignment::Right))
.collect()
}
fn fmt_diff_round(label: &str, round: &RoundStatsDiff) -> Vec<String> {
vec![
label.to_string(),
fmt_percent(round.rmr),
fmt_percent(round.ldh),
fmt_percent(round.missed),
fmt_percent(round.duration),
]
}
fn fmt_percent(diff: f32) -> String {
format!("{:>+10.2}%", diff * 100.)
}

View file

@ -0,0 +1,25 @@
#![cfg_attr(feature = "net", doc = include_str!("../README.md"))]
//! Broadcast messages to peers subscribed to a topic
//!
//! The crate is designed to be used from the [iroh] crate, which provides a
//! [high level interface](https://docs.rs/iroh/latest/iroh/client/gossip/index.html),
//! but can also be used standalone.
//!
//! [iroh]: https://docs.rs/iroh
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![cfg_attr(iroh_docsrs, feature(doc_cfg))]
#[cfg(feature = "net")]
pub use net::Gossip;
#[cfg(feature = "net")]
#[doc(inline)]
pub use net::GOSSIP_ALPN as ALPN;
#[cfg(any(feature = "net", feature = "rpc"))]
pub mod api;
pub mod metrics;
#[cfg(feature = "net")]
pub mod net;
pub mod proto;
pub use proto::TopicId;

View file

@ -0,0 +1,45 @@
//! Metrics for iroh-gossip
use iroh_metrics::{Counter, MetricsGroup};
/// Enum of metrics for the module
#[derive(Debug, Default, MetricsGroup)]
#[metrics(name = "gossip")]
pub struct Metrics {
/// Number of control messages sent
pub msgs_ctrl_sent: Counter,
/// Number of control messages received
pub msgs_ctrl_recv: Counter,
/// Number of data messages sent
pub msgs_data_sent: Counter,
/// Number of data messages received
pub msgs_data_recv: Counter,
/// Total size of all data messages sent
pub msgs_data_sent_size: Counter,
/// Total size of all data messages received
pub msgs_data_recv_size: Counter,
/// Total size of all control messages sent
pub msgs_ctrl_sent_size: Counter,
/// Total size of all control messages received
pub msgs_ctrl_recv_size: Counter,
/// Number of times we connected to a peer
pub neighbor_up: Counter,
/// Number of times we disconnected from a peer
pub neighbor_down: Counter,
/// Number of times the main actor loop ticked
pub actor_tick_main: Counter,
/// Number of times the actor ticked for a message received
pub actor_tick_rx: Counter,
/// Number of times the actor ticked for an endpoint event
pub actor_tick_endpoint: Counter,
/// Number of times the actor ticked for a dialer event
pub actor_tick_dialer: Counter,
/// Number of times the actor ticked for a successful dialer event
pub actor_tick_dialer_success: Counter,
/// Number of times the actor ticked for a failed dialer event
pub actor_tick_dialer_failure: Counter,
/// Number of times the actor ticked for an incoming event
pub actor_tick_in_event_rx: Counter,
/// Number of times the actor ticked for a timer event
pub actor_tick_timers: Counter,
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,175 @@
//! An address lookup service to gather addressing info collected from gossip Join and ForwardJoin messages.
use std::{
collections::{btree_map::Entry, BTreeMap},
sync::{Arc, RwLock},
time::Duration,
};
use iroh::address_lookup::{self, AddressLookup, EndpointData, EndpointInfo};
use iroh_base::EndpointId;
use n0_future::{
boxed::BoxStream,
stream::{self, StreamExt},
task::AbortOnDropHandle,
time::SystemTime,
};
pub(crate) struct RetentionOpts {
/// How long to keep received endpoint info records alive before pruning them
retention: Duration,
/// How often to check for expired entries
evict_interval: Duration,
}
impl Default for RetentionOpts {
fn default() -> Self {
Self {
retention: Duration::from_secs(60 * 5),
evict_interval: Duration::from_secs(30),
}
}
}
/// An address lookup service that expires endpoints after some time.
///
/// It is added to the endpoint when constructing a gossip instance, and the gossip actor
/// then adds endpoint addresses as received with Join or ForwardJoin messages.
#[derive(Debug, Clone)]
pub(crate) struct GossipAddressLookup {
endpoints: NodeMap,
_task_handle: Arc<AbortOnDropHandle<()>>,
}
type NodeMap = Arc<RwLock<BTreeMap<EndpointId, StoredEndpointInfo>>>;
#[derive(Debug)]
struct StoredEndpointInfo {
data: EndpointData,
last_updated: SystemTime,
}
impl Default for GossipAddressLookup {
fn default() -> Self {
Self::new()
}
}
impl GossipAddressLookup {
const PROVENANCE: &'static str = "gossip";
/// Creates a new gossip address lookup instance.
pub(crate) fn new() -> Self {
Self::with_opts(Default::default())
}
pub(crate) fn with_opts(opts: RetentionOpts) -> Self {
let endpoints: NodeMap = Default::default();
let task = {
let endpoints = Arc::downgrade(&endpoints);
n0_future::task::spawn(async move {
let mut interval = n0_future::time::interval(opts.evict_interval);
loop {
interval.tick().await;
let Some(endpoints) = endpoints.upgrade() else {
break;
};
let now = SystemTime::now();
endpoints.write().expect("poisoned").retain(|_k, v| {
let age = now.duration_since(v.last_updated).unwrap_or(Duration::MAX);
age <= opts.retention
});
}
})
};
Self {
endpoints,
_task_handle: Arc::new(AbortOnDropHandle::new(task)),
}
}
/// Augments endpoint addressing information for the given endpoint ID.
///
/// The provided addressing information is combined with the existing info in the in-memory
/// lookup. Any new direct addresses are added to those already present while the
/// relay URL is overwritten.
pub(crate) fn add(&self, endpoint_info: impl Into<EndpointInfo>) {
let last_updated = SystemTime::now();
let EndpointInfo { endpoint_id, data } = endpoint_info.into();
let mut guard = self.endpoints.write().expect("poisoned");
match guard.entry(endpoint_id) {
Entry::Occupied(mut entry) => {
let existing = entry.get_mut();
existing.data.add_addrs(data.addrs().cloned());
existing.data.set_user_data(data.user_data().cloned());
existing.last_updated = last_updated;
}
Entry::Vacant(entry) => {
entry.insert(StoredEndpointInfo { data, last_updated });
}
}
}
}
impl AddressLookup for GossipAddressLookup {
fn resolve(
&self,
endpoint_id: EndpointId,
) -> Option<BoxStream<Result<address_lookup::Item, address_lookup::Error>>> {
let guard = self.endpoints.read().expect("poisoned");
let info = guard.get(&endpoint_id)?;
let last_updated = info
.last_updated
.duration_since(SystemTime::UNIX_EPOCH)
.expect("time drift")
.as_micros() as u64;
let item = address_lookup::Item::new(
EndpointInfo::from_parts(endpoint_id, info.data.clone()),
Self::PROVENANCE,
Some(last_updated),
);
Some(stream::iter(Some(Ok(item))).boxed())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use iroh::{address_lookup::AddressLookup, EndpointAddr, SecretKey};
use n0_future::StreamExt;
use rand::SeedableRng;
use super::{GossipAddressLookup, RetentionOpts};
#[tokio::test]
async fn test_retention() {
let opts = RetentionOpts {
evict_interval: Duration::from_millis(100),
retention: Duration::from_millis(500),
};
let disco = GossipAddressLookup::with_opts(opts);
let rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let k1 = SecretKey::generate(rng);
let a1 = EndpointAddr::new(k1.public());
disco.add(a1);
assert!(matches!(
disco.resolve(k1.public()).unwrap().next().await,
Some(Ok(_))
));
tokio::time::sleep(Duration::from_millis(200)).await;
assert!(matches!(
disco.resolve(k1.public()).unwrap().next().await,
Some(Ok(_))
));
tokio::time::sleep(Duration::from_millis(700)).await;
assert!(disco.resolve(k1.public()).is_none());
}
}

View file

@ -0,0 +1,435 @@
//! Utilities for iroh-gossip networking
use std::{
collections::{hash_map, HashMap},
io,
time::Duration,
};
use bytes::{Bytes, BytesMut};
use iroh::{
endpoint::{Connection, RecvStream, SendStream},
EndpointId,
};
use n0_error::{e, stack_error};
use n0_future::{
time::{sleep_until, Instant},
FuturesUnordered, StreamExt,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
sync::mpsc,
task::JoinSet,
};
use tracing::{debug, trace, Instrument};
use super::{InEvent, ProtoMessage};
use crate::proto::{util::TimerMap, TopicId};
/// Errors related to message writing
#[allow(missing_docs)]
#[stack_error(derive, add_meta, from_sources)]
#[non_exhaustive]
pub(crate) enum WriteError {
/// Connection error
#[error("Connection error")]
Connection {
#[error(std_err)]
source: iroh::endpoint::ConnectionError,
},
/// Serialization failed
#[error("Serialization failed")]
Ser {
#[error(std_err)]
source: postcard::Error,
},
/// IO error
#[error("IO error")]
Io {
#[error(std_err)]
source: std::io::Error,
},
/// Message was larger than the configured maximum message size
#[error("message too large")]
TooLarge {},
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct StreamHeader {
pub(crate) topic_id: TopicId,
}
impl StreamHeader {
pub(crate) async fn read(
stream: &mut RecvStream,
buffer: &mut BytesMut,
max_message_size: usize,
) -> Result<Self, ReadError> {
let header: Self = read_frame(stream, buffer, max_message_size)
.await?
.ok_or_else(|| {
ReadError::from(io::Error::new(
io::ErrorKind::UnexpectedEof,
"stream ended before header",
))
})?;
Ok(header)
}
pub(crate) async fn write(
self,
stream: &mut SendStream,
buffer: &mut Vec<u8>,
max_message_size: usize,
) -> Result<(), WriteError> {
write_frame(stream, &self, buffer, max_message_size).await?;
Ok(())
}
}
pub(crate) struct RecvLoop {
remote_endpoint_id: EndpointId,
conn: Connection,
max_message_size: usize,
in_event_tx: mpsc::Sender<InEvent>,
}
impl RecvLoop {
pub(crate) fn new(
remote_endpoint_id: EndpointId,
conn: Connection,
in_event_tx: mpsc::Sender<InEvent>,
max_message_size: usize,
) -> Self {
Self {
remote_endpoint_id,
conn,
max_message_size,
in_event_tx,
}
}
pub(crate) async fn run(&mut self) -> Result<(), ReadError> {
let mut read_futures = FuturesUnordered::new();
let mut conn_is_closed = false;
let closed = self.conn.closed();
tokio::pin!(closed);
while !conn_is_closed || !read_futures.is_empty() {
tokio::select! {
_ = &mut closed, if !conn_is_closed => {
conn_is_closed = true;
}
stream = self.conn.accept_uni(), if !conn_is_closed => {
let stream = match stream {
Ok(stream) => stream,
Err(_) => {
conn_is_closed = true;
continue;
}
};
let state = RecvStreamState::new(stream, self.max_message_size).await?;
debug!(topic=%state.header.topic_id.fmt_short(), "stream opened");
read_futures.push(state.next());
}
Some(res) = read_futures.next(), if !read_futures.is_empty() => {
let (state, msg) = match res {
Ok((state, msg)) => (state, msg),
Err(err) => {
debug!("recv stream closed with error: {err:#}");
continue;
}
};
match msg {
None => debug!(topic=%state.header.topic_id.fmt_short(), "stream closed"),
Some(msg) => {
if self.in_event_tx.send(InEvent::RecvMessage(self.remote_endpoint_id, msg)).await.is_err() {
debug!("stop recv loop: actor closed");
break;
}
read_futures.push(state.next());
}
}
}
}
}
debug!("recv loop closed");
Ok(())
}
}
#[derive(Debug)]
struct RecvStreamState {
stream: RecvStream,
header: StreamHeader,
buffer: BytesMut,
max_message_size: usize,
}
impl RecvStreamState {
async fn new(mut stream: RecvStream, max_message_size: usize) -> Result<Self, ReadError> {
let mut buffer = BytesMut::new();
let header = StreamHeader::read(&mut stream, &mut buffer, max_message_size).await?;
Ok(Self {
buffer: BytesMut::new(),
max_message_size,
stream,
header,
})
}
/// Reads the next message from the stream.
///
/// Returns `self` and the next message, or `None` if the stream ended gracefully.
///
/// ## Cancellation safety
///
/// This function is not cancellation-safe.
async fn next(mut self) -> Result<(Self, Option<ProtoMessage>), ReadError> {
let msg = read_frame(&mut self.stream, &mut self.buffer, self.max_message_size).await?;
let msg = msg.map(|msg| ProtoMessage {
topic: self.header.topic_id,
message: msg,
});
Ok((self, msg))
}
}
pub(crate) struct SendLoop {
conn: Connection,
streams: HashMap<TopicId, SendStream>,
buffer: Vec<u8>,
max_message_size: usize,
finishing: JoinSet<()>,
send_rx: mpsc::Receiver<ProtoMessage>,
}
impl SendLoop {
pub(crate) fn new(
conn: Connection,
send_rx: mpsc::Receiver<ProtoMessage>,
max_message_size: usize,
) -> Self {
Self {
conn,
max_message_size,
buffer: Default::default(),
streams: Default::default(),
finishing: Default::default(),
send_rx,
}
}
pub(crate) async fn run(&mut self, queue: Vec<ProtoMessage>) -> Result<(), WriteError> {
for msg in queue {
self.write_message(&msg).await?;
}
let conn_clone = self.conn.clone();
let closed = conn_clone.closed();
tokio::pin!(closed);
loop {
tokio::select! {
biased;
_ = &mut closed => break,
Some(msg) = self.send_rx.recv() => self.write_message(&msg).await?,
_ = self.finishing.join_next(), if !self.finishing.is_empty() => {}
else => break,
}
}
// Close remaining streams.
for (topic_id, mut stream) in self.streams.drain() {
stream.finish().ok();
self.finishing.spawn(
async move {
stream.stopped().await.ok();
debug!(topic=%topic_id.fmt_short(), "stream closed");
}
.instrument(tracing::Span::current()),
);
}
if !self.finishing.is_empty() {
trace!(
"send loop closing, waiting for {} send streams to finish",
self.finishing.len()
);
// Wait for the remote to acknowledge all streams are finished.
if let Err(_elapsed) = n0_future::time::timeout(Duration::from_secs(5), async move {
while self.finishing.join_next().await.is_some() {}
})
.await
{
debug!("not all send streams finished within timeout, abort")
}
}
debug!("send loop closed");
Ok(())
}
/// Write a [`ProtoMessage`] as a length-prefixed, postcard-encoded message on its stream.
///
/// If no stream is opened yet, this opens a new stream for the topic and writes the topic header.
///
/// This function is not cancellation-safe.
pub async fn write_message(&mut self, message: &ProtoMessage) -> Result<(), WriteError> {
let ProtoMessage { topic, message } = message;
let topic_id = *topic;
let is_last = message.is_disconnect();
let mut entry = match self.streams.entry(topic_id) {
hash_map::Entry::Occupied(entry) => entry,
hash_map::Entry::Vacant(entry) => {
let mut stream = self.conn.open_uni().await?;
let header = StreamHeader { topic_id };
header
.write(&mut stream, &mut self.buffer, self.max_message_size)
.await?;
debug!(topic=%topic_id.fmt_short(), "stream opened");
entry.insert_entry(stream)
}
};
let stream = entry.get_mut();
write_frame(stream, message, &mut self.buffer, self.max_message_size).await?;
if is_last {
trace!(topic=%topic_id.fmt_short(), "stream closing");
let mut stream = entry.remove();
if stream.finish().is_ok() {
self.finishing.spawn(
async move {
stream.stopped().await.ok();
debug!(topic=%topic_id.fmt_short(), "stream closed");
}
.instrument(tracing::Span::current()),
);
}
}
Ok(())
}
}
/// Errors related to message reading
#[allow(missing_docs)]
#[stack_error(derive, add_meta, from_sources)]
#[non_exhaustive]
pub(crate) enum ReadError {
/// Deserialization failed
#[error("Deserialization failed")]
De {
#[error(std_err)]
source: postcard::Error,
},
/// IO error
#[error("IO error")]
Io {
#[error(std_err)]
source: std::io::Error,
},
/// Message was larger than the configured maximum message size
#[error("message too large")]
TooLarge {},
}
/// Read a length-prefixed frame and decode with postcard.
pub async fn read_frame<T: DeserializeOwned>(
reader: &mut RecvStream,
buffer: &mut BytesMut,
max_message_size: usize,
) -> Result<Option<T>, ReadError> {
match read_lp(reader, buffer, max_message_size).await? {
None => Ok(None),
Some(data) => {
let message = postcard::from_bytes(&data)?;
Ok(Some(message))
}
}
}
/// Reads a length prefixed buffer.
///
/// Returns the frame as raw bytes. If the end of the stream is reached before
/// the frame length starts, `None` is returned.
pub async fn read_lp(
reader: &mut RecvStream,
buffer: &mut BytesMut,
max_message_size: usize,
) -> Result<Option<Bytes>, ReadError> {
let size = match reader.read_u32().await {
Ok(size) => size,
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => return Ok(None),
Err(err) => return Err(err.into()),
};
let size = usize::try_from(size).map_err(|_| e!(ReadError::TooLarge))?;
if size > max_message_size {
return Err(e!(ReadError::TooLarge));
}
buffer.resize(size, 0u8);
reader
.read_exact(&mut buffer[..])
.await
.map_err(io::Error::other)?;
Ok(Some(buffer.split_to(size).freeze()))
}
/// Writes a length-prefixed frame.
pub async fn write_frame<T: Serialize>(
stream: &mut SendStream,
message: &T,
buffer: &mut Vec<u8>,
max_message_size: usize,
) -> Result<(), WriteError> {
let len = postcard::experimental::serialized_size(&message)?;
if len >= max_message_size {
return Err(e!(WriteError::TooLarge));
}
buffer.clear();
buffer.resize(len, 0u8);
let slice = postcard::to_slice(&message, buffer)?;
stream.write_u32(len as u32).await?;
stream.write_all(slice).await.map_err(io::Error::other)?;
Ok(())
}
/// A [`TimerMap`] with an async method to wait for the next timer expiration.
#[derive(Debug)]
pub struct Timers<T> {
map: TimerMap<T>,
}
impl<T> Default for Timers<T> {
fn default() -> Self {
Self {
map: TimerMap::default(),
}
}
}
impl<T> Timers<T> {
/// Creates a new timer map.
pub fn new() -> Self {
Self::default()
}
/// Inserts a new entry at the specified instant
pub fn insert(&mut self, instant: Instant, item: T) {
self.map.insert(instant, item);
}
/// Waits for the next timer to elapse.
pub async fn wait_next(&mut self) -> Instant {
match self.map.first() {
None => std::future::pending::<Instant>().await,
Some(instant) => {
sleep_until(*instant).await;
*instant
}
}
}
/// Pops the earliest timer that expires at or before `now`.
pub fn pop_before(&mut self, now: Instant) -> Option<(Instant, T)> {
self.map.pop_before(now)
}
}

View file

@ -0,0 +1,344 @@
//! Implementation of the iroh-gossip protocol, as an IO-less state machine
//!
//! This module implements the iroh-gossip protocol. The entry point is [`State`], which contains
//! the protocol state for a node.
//!
//! The iroh-gossip protocol is made up from two parts: A swarm membership protocol, based on
//! [HyParView][hyparview], and a gossip broadcasting protocol, based on [PlumTree][plumtree].
//!
//! For a full explanation it is recommended to read the two papers. What follows is a brief
//! outline of the protocols.
//!
//! All protocol messages are namespaced by a [`TopicId`], a 32 byte identifier. Topics are
//! separate swarms and broadcast scopes. The HyParView and PlumTree algorithms both work in the
//! scope of a single topic. Thus, joining multiple topics increases the number of open connections
//! to peers and the size of the local routing table.
//!
//! The **membership protocol** ([HyParView][hyparview]) is a cluster protocol where each peer
//! maintains a partial view of all nodes in the swarm.
//! A peer joins the swarm for a topic by connecting to any known peer that is a member of this
//! topic's swarm. Obtaining this initial contact info happens out of band. The peer then sends
//! a `Join` message to that initial peer. All peers maintain a list of
//! `active` and `passive` peers. Active peers are those that you maintain active connections to.
//! Passive peers is an addressbook of additional peers. If one of your active peers goes offline,
//! its slot is filled with a random peer from the passive set. In the default configuration, the
//! active view has a size of 5 and the passive view a size of 30.
//! The HyParView protocol ensures that active connections are always bidirectional, and regularly
//! exchanges nodes for the passive view in a `Shuffle` operation.
//! Thus, this protocol exposes a high degree of reliability and auto-recovery in the case of node
//! failures.
//!
//! The **gossip protocol** ([PlumTree][plumtree]) builds upon the membership protocol. It exposes
//! a method to broadcast messages to all peers in the swarm. On each node, it maintains two sets
//! of peers: An `eager` set and a `lazy` set. Both are subsets of the `active` view from the
//! membership protocol. When broadcasting a message from the local node, or upon receiving a
//! broadcast message, the message is pushed to all peers in the eager set. Additionally, the hash
//! of the message (which uniquely identifies it), but not the message content, is lazily pushed
//! to all peers in the `lazy` set. When receiving such lazy pushes (called `Ihaves`), those peers
//! may request the message content after a timeout if they didn't receive the message by one of
//! their eager peers before. When requesting a message from a currently-lazy peer, this peer is
//! also upgraded to be an eager peer from that moment on. This strategy self-optimizes the
//! messaging graph by latency. Note however that this optimization will work best if the messaging
//! paths are stable, i.e. if it's always the same peer that broadcasts. If not, the relative
//! message redundancy will grow and the ideal messaging graph might change frequently.
//!
//! [hyparview]: https://asc.di.fct.unl.pt/~jleitao/pdf/dsn07-leitao.pdf
//! [plumtree]: https://asc.di.fct.unl.pt/~jleitao/pdf/srds07-leitao.pdf
use std::{fmt, hash::Hash};
use bytes::Bytes;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
mod hyparview;
mod plumtree;
pub mod state;
pub mod topic;
pub mod util;
#[cfg(any(test, feature = "test-utils"))]
pub mod sim;
pub use hyparview::Config as HyparviewConfig;
pub use plumtree::{Config as PlumtreeConfig, DeliveryScope, Scope};
pub use state::{InEvent, Message, OutEvent, State, Timer, TopicId};
pub use topic::{Command, Config, Event, IO};
/// The default maximum size in bytes for a gossip message.
/// This is a sane but arbitrary default and can be changed in the [`Config`].
pub const DEFAULT_MAX_MESSAGE_SIZE: usize = 4096;
/// The minimum allowed value for [`Config::max_message_size`].
pub const MIN_MAX_MESSAGE_SIZE: usize = 512;
/// The identifier for a peer.
///
/// The protocol implementation is generic over this trait. When implementing the protocol,
/// a concrete type must be chosen that will then be used throughout the implementation to identify
/// and index individual peers.
///
/// Note that the concrete type will be used in protocol messages. Therefore, implementations of
/// the protocol are only compatible if the same concrete type is supplied for this trait.
///
/// TODO: Rename to `PeerId`? It does not necessarily refer to a peer's address, as long as the
/// networking layer can translate the value of its concrete type into an address.
pub trait PeerIdentity: Hash + Eq + Ord + Copy + fmt::Debug + Serialize + DeserializeOwned {}
impl<T> PeerIdentity for T where
T: Hash + Eq + Ord + Copy + fmt::Debug + Serialize + DeserializeOwned
{
}
/// Opaque binary data that is transmitted on messages that introduce new peers.
///
/// Implementations may use these bytes to supply addresses or other information needed to connect
/// to a peer that is not included in the peer's [`PeerIdentity`].
#[derive(derive_more::Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
#[debug("PeerData({}b)", self.0.len())]
pub struct PeerData(Bytes);
impl PeerData {
/// Create a new [`PeerData`] from a byte buffer.
pub fn new(data: impl Into<Bytes>) -> Self {
Self(data.into())
}
/// Get a reference to the contained [`bytes::Bytes`].
pub fn inner(&self) -> &bytes::Bytes {
&self.0
}
/// Get the peer data as a byte slice.
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
}
/// PeerInfo contains a peer's identifier and the opaque peer data as provided by the implementer.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
struct PeerInfo<PI> {
pub id: PI,
pub data: Option<PeerData>,
}
impl<PI> From<(PI, Option<PeerData>)> for PeerInfo<PI> {
fn from((id, data): (PI, Option<PeerData>)) -> Self {
Self { id, data }
}
}
#[cfg(test)]
mod test {
use std::{collections::HashSet, env, fmt, str::FromStr};
use n0_tracing_test::traced_test;
use rand::SeedableRng;
use rand_chacha::ChaCha12Rng;
use super::{Command, Config, Event};
use crate::proto::{
sim::{LatencyConfig, Network, NetworkConfig},
Scope, TopicId,
};
#[test]
#[traced_test]
fn hyparview_smoke() {
// Create a network with 4 nodes and active_view_capacity 2
let rng = ChaCha12Rng::seed_from_u64(read_var("SEED", 0));
let mut config = Config::default();
config.membership.active_view_capacity = 2;
let network_config = NetworkConfig {
proto: config,
latency: LatencyConfig::default_static(),
};
let mut network = Network::new(network_config, rng);
for i in 0..4 {
network.insert(i);
}
let t: TopicId = [0u8; 32].into();
// Do some joins between nodes 0,1,2
network.command(0, t, Command::Join(vec![1, 2]));
network.command(1, t, Command::Join(vec![2]));
network.command(2, t, Command::Join(vec![]));
network.run_trips(3);
// Confirm emitted events
let actual = network.events_sorted();
let expected = sort(vec![
(0, t, Event::NeighborUp(1)),
(0, t, Event::NeighborUp(2)),
(1, t, Event::NeighborUp(2)),
(1, t, Event::NeighborUp(0)),
(2, t, Event::NeighborUp(0)),
(2, t, Event::NeighborUp(1)),
]);
assert_eq!(actual, expected);
// Confirm active connections
assert_eq!(network.conns(), vec![(0, 1), (0, 2), (1, 2)]);
// Now let node 3 join node 0.
// Node 0 is full, so it will disconnect from either node 1 or node 2.
network.command(3, t, Command::Join(vec![0]));
network.run_trips(2);
// Confirm emitted events. There's two options because whether node 0 disconnects from
// node 1 or node 2 is random.
let actual = network.events_sorted();
eprintln!("actual {actual:#?}");
let expected1 = sort(vec![
(3, t, Event::NeighborUp(0)),
(0, t, Event::NeighborUp(3)),
(0, t, Event::NeighborDown(1)),
(1, t, Event::NeighborDown(0)),
]);
let expected2 = sort(vec![
(3, t, Event::NeighborUp(0)),
(0, t, Event::NeighborUp(3)),
(0, t, Event::NeighborDown(2)),
(2, t, Event::NeighborDown(0)),
]);
assert!((actual == expected1) || (actual == expected2));
// Confirm active connections.
if actual == expected1 {
assert_eq!(network.conns(), vec![(0, 2), (0, 3), (1, 2)]);
} else {
assert_eq!(network.conns(), vec![(0, 1), (0, 3), (1, 2)]);
}
assert!(network.check_synchronicity());
}
#[test]
#[traced_test]
fn plumtree_smoke() {
let rng = ChaCha12Rng::seed_from_u64(read_var("SEED", 0));
let network_config = NetworkConfig {
proto: Config::default(),
latency: LatencyConfig::default_static(),
};
let mut network = Network::new(network_config, rng);
// build a network with 6 nodes
for i in 0..6 {
network.insert(i);
}
let t = [0u8; 32].into();
// let node 0 join the topic but do not connect to any peers
network.command(0, t, Command::Join(vec![]));
// connect nodes 1 and 2 to node 0
(1..3).for_each(|i| network.command(i, t, Command::Join(vec![0])));
// connect nodes 4 and 5 to node 3
network.command(3, t, Command::Join(vec![]));
(4..6).for_each(|i| network.command(i, t, Command::Join(vec![3])));
// run ticks and drain events
network.run_trips(4);
let _ = network.events();
assert!(network.check_synchronicity());
// now broadcast a first message
network.command(
1,
t,
Command::Broadcast(b"hi1".to_vec().into(), Scope::Swarm),
);
network.run_trips(4);
let events = network.events();
let received = events.filter(|x| matches!(x, (_, _, Event::Received(_))));
// message should be received by two other nodes
assert_eq!(received.count(), 2);
assert!(network.check_synchronicity());
// now connect the two sections of the swarm
network.command(2, t, Command::Join(vec![5]));
network.run_trips(3);
let _ = network.events();
println!("{}", network.report());
// now broadcast again
network.command(
1,
t,
Command::Broadcast(b"hi2".to_vec().into(), Scope::Swarm),
);
network.run_trips(5);
let events = network.events();
let received = events.filter(|x| matches!(x, (_, _, Event::Received(_))));
// message should be received by all 5 other nodes
assert_eq!(received.count(), 5);
assert!(network.check_synchronicity());
println!("{}", network.report());
}
#[test]
#[traced_test]
fn quit() {
// Create a network with 4 nodes and active_view_capacity 2
let rng = ChaCha12Rng::seed_from_u64(read_var("SEED", 0));
let mut config = Config::default();
config.membership.active_view_capacity = 2;
let mut network = Network::new(config.into(), rng);
let num = 4;
for i in 0..num {
network.insert(i);
}
let t: TopicId = [0u8; 32].into();
// join all nodes
network.command(0, t, Command::Join(vec![]));
network.command(1, t, Command::Join(vec![0]));
network.command(2, t, Command::Join(vec![1]));
network.command(3, t, Command::Join(vec![2]));
network.run_trips(2);
// assert all peers appear in the connections
let all_conns: HashSet<u64> = HashSet::from_iter((0u64..4).flat_map(|p| {
network
.neighbors(&p, &t)
.into_iter()
.flat_map(|x| x.into_iter())
}));
assert_eq!(all_conns, HashSet::from_iter([0, 1, 2, 3]));
assert!(network.check_synchronicity());
// let node 3 leave the swarm
network.command(3, t, Command::Quit);
network.run_trips(4);
assert!(network.peer(&3).unwrap().state(&t).is_none());
// assert all peers without peer 3 appear in the connections
let all_conns: HashSet<u64> = HashSet::from_iter((0..num).flat_map(|p| {
network
.neighbors(&p, &t)
.into_iter()
.flat_map(|x| x.into_iter())
}));
assert_eq!(all_conns, HashSet::from_iter([0, 1, 2]));
assert!(network.check_synchronicity());
}
fn read_var<T: FromStr<Err: fmt::Display + fmt::Debug>>(name: &str, default: T) -> T {
env::var(name)
.map(|x| {
x.parse()
.unwrap_or_else(|_| panic!("Failed to parse environment variable {name}"))
})
.unwrap_or(default)
}
fn sort<T: Ord + Clone>(items: Vec<T>) -> Vec<T> {
let mut sorted = items;
sorted.sort();
sorted
}
}

View file

@ -0,0 +1,764 @@
//! Implementation of the HyParView membership protocol
//!
//! The implementation is based on [this paper][paper] by Joao Leitao, Jose Pereira, Luıs Rodrigues
//! and the [example implementation][impl] by Bartosz Sypytkowski
//!
//! [paper]: https://asc.di.fct.unl.pt/~jleitao/pdf/dsn07-leitao.pdf
//! [impl]: https://gist.github.com/Horusiath/84fac596101b197da0546d1697580d99
use std::collections::{HashMap, HashSet};
use derive_more::{From, Sub};
use n0_future::time::Duration;
use rand::{rngs::ThreadRng, Rng};
use serde::{Deserialize, Serialize};
use tracing::debug;
use super::{util::IndexSet, PeerData, PeerIdentity, PeerInfo, IO};
/// Input event for HyParView
#[derive(Debug)]
pub enum InEvent<PI> {
/// A [`Message`] was received from a peer.
RecvMessage(PI, Message<PI>),
/// A timer has expired.
TimerExpired(Timer<PI>),
/// A peer was disconnected on the IO layer.
PeerDisconnected(PI),
/// Send a join request to a peer.
RequestJoin(PI),
/// Update the peer data that is transmitted on join requests.
UpdatePeerData(PeerData),
/// Quit the swarm, informing peers about us leaving.
Quit,
}
/// Output event for HyParView
#[derive(Debug)]
pub enum OutEvent<PI> {
/// Ask the IO layer to send a [`Message`] to peer `PI`.
SendMessage(PI, Message<PI>),
/// Schedule a [`Timer`].
ScheduleTimer(Duration, Timer<PI>),
/// Ask the IO layer to close the connection to peer `PI`.
DisconnectPeer(PI),
/// Emit an [`Event`] to the application.
EmitEvent(Event<PI>),
/// New [`PeerData`] was received for peer `PI`.
PeerData(PI, PeerData),
}
/// Event emitted by the [`State`] to the application.
#[derive(Clone, Debug)]
pub enum Event<PI> {
/// A peer was added to our set of active connections.
NeighborUp(PI),
/// A peer was removed from our set of active connections.
NeighborDown(PI),
}
/// Kinds of timers HyParView needs to schedule.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Timer<PI> {
DoShuffle,
PendingNeighborRequest(PI),
}
/// Messages that we can send and receive from peers within the topic.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum Message<PI> {
/// Sent to a peer if you want to join the swarm
Join(Option<PeerData>),
/// When receiving Join, ForwardJoin is forwarded to the peer's ActiveView to introduce the
/// new member.
ForwardJoin(ForwardJoin<PI>),
/// A shuffle request is sent occasionally to re-shuffle the PassiveView with contacts from
/// other peers.
Shuffle(Shuffle<PI>),
/// Peers reply to [`Message::Shuffle`] requests with a random peers from their active and
/// passive views.
ShuffleReply(ShuffleReply<PI>),
/// Request to add sender to an active view of recipient. If [`Neighbor::priority`] is
/// [`Priority::High`], the request cannot be denied.
Neighbor(Neighbor),
/// Request to disconnect from a peer.
/// If [`Disconnect::alive`] is true, the other peer is not shutting down, so it should be
/// added to the passive set.
Disconnect(Disconnect),
}
/// The time-to-live for this message.
///
/// Each time a message is forwarded, the `Ttl` is decreased by 1. If the `Ttl` reaches 0, it
/// should not be forwarded further.
#[derive(From, Sub, Eq, PartialEq, Clone, Debug, Copy, Serialize, Deserialize)]
pub struct Ttl(pub u16);
impl Ttl {
pub fn expired(&self) -> bool {
*self == Ttl(0)
}
pub fn next(&self) -> Ttl {
Ttl(self.0.saturating_sub(1))
}
}
/// A message informing other peers that a new peer joined the swarm for this topic.
///
/// Will be forwarded in a random walk until `ttl` reaches 0.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ForwardJoin<PI> {
/// The peer that newly joined the swarm
peer: PeerInfo<PI>,
/// The time-to-live for this message
ttl: Ttl,
}
/// Shuffle messages are sent occasionally to shuffle our passive view with peers from other peer's
/// active and passive views.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Shuffle<PI> {
/// The peer that initiated the shuffle request.
origin: PI,
/// A random subset of the active and passive peers of the `origin` peer.
nodes: Vec<PeerInfo<PI>>,
/// The time-to-live for this message.
ttl: Ttl,
}
/// Once a shuffle messages reaches a [`Ttl`] of 0, a peer replies with a `ShuffleReply`.
///
/// The reply is sent to the peer that initiated the shuffle and contains a subset of the active
/// and passive views of the peer at the end of the random walk.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ShuffleReply<PI> {
/// A random subset of the active and passive peers of the peer sending the `ShuffleReply`.
nodes: Vec<PeerInfo<PI>>,
}
/// The priority of a `Join` message
///
/// This is `High` if the sender does not have any active peers, and `Low` otherwise.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum Priority {
/// High priority join that may not be denied.
///
/// A peer may only send high priority joins if it doesn't have any active peers at the moment.
High,
/// Low priority join that can be denied.
Low,
}
/// A neighbor message is sent after adding a peer to our active view to inform them that we are
/// now neighbors.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Neighbor {
/// The priority of the `Join` or `ForwardJoin` message that triggered this neighbor request.
priority: Priority,
/// The user data of the peer sending this message.
data: Option<PeerData>,
}
/// Message sent when leaving the swarm or closing down to inform peers about us being gone.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Disconnect {
/// Whether we are actually shutting down or closing the connection only because our limits are
/// reached.
alive: bool,
/// Obsolete field (kept in the struct to maintain wire compatibility).
_respond: bool,
}
/// Configuration for the swarm membership layer
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Number of peers to which active connections are maintained
pub active_view_capacity: usize,
/// Number of peers for which contact information is remembered,
/// but to which we are not actively connected to.
pub passive_view_capacity: usize,
/// Number of hops a `ForwardJoin` message is propagated until the new peer's info
/// is added to a peer's active view.
pub active_random_walk_length: Ttl,
/// Number of hops a `ForwardJoin` message is propagated until the new peer's info
/// is added to a peer's passive view.
pub passive_random_walk_length: Ttl,
/// Number of hops a `Shuffle` message is propagated until a peer replies to it.
pub shuffle_random_walk_length: Ttl,
/// Number of active peers to be included in a `Shuffle` request.
pub shuffle_active_view_count: usize,
/// Number of passive peers to be included in a `Shuffle` request.
pub shuffle_passive_view_count: usize,
/// Interval duration for shuffle requests
pub shuffle_interval: Duration,
/// Timeout after which a `Neighbor` request is considered failed
pub neighbor_request_timeout: Duration,
}
impl Default for Config {
/// Default values for the HyParView layer
fn default() -> Self {
Self {
// From the paper (p9)
active_view_capacity: 5,
// From the paper (p9)
passive_view_capacity: 30,
// From the paper (p9)
active_random_walk_length: Ttl(6),
// From the paper (p9)
passive_random_walk_length: Ttl(3),
// From the paper (p9)
shuffle_random_walk_length: Ttl(6),
// From the paper (p9)
shuffle_active_view_count: 3,
// From the paper (p9)
shuffle_passive_view_count: 4,
// Wild guess
shuffle_interval: Duration::from_secs(60),
// Wild guess
neighbor_request_timeout: Duration::from_millis(500),
}
}
}
#[derive(Default, Debug, Clone)]
pub struct Stats {
total_connections: usize,
}
/// The state of the HyParView protocol
#[derive(Debug)]
pub struct State<PI, RG = ThreadRng> {
/// Our peer identity
me: PI,
/// Our opaque user data to transmit to peers on join messages
me_data: Option<PeerData>,
/// The active view, i.e. peers we are connected to
pub(crate) active_view: IndexSet<PI>,
/// The passive view, i.e. peers we know about but are not connected to at the moment
pub(crate) passive_view: IndexSet<PI>,
/// Protocol configuration (cannot change at runtime)
config: Config,
/// Whether a shuffle timer is currently scheduled
shuffle_scheduled: bool,
/// Random number generator
rng: RG,
/// Statistics
pub(crate) stats: Stats,
/// The set of neighbor requests we sent out but did not yet receive a reply for
pending_neighbor_requests: HashSet<PI>,
/// The opaque user peer data we received for other peers
peer_data: HashMap<PI, PeerData>,
/// List of peers that are disconnecting, but which we want to keep in the passive set once the connection closes
alive_disconnect_peers: HashSet<PI>,
}
impl<PI, RG> State<PI, RG>
where
PI: PeerIdentity,
RG: Rng,
{
pub fn new(me: PI, me_data: Option<PeerData>, config: Config, rng: RG) -> Self {
Self {
me,
me_data,
active_view: IndexSet::new(),
passive_view: IndexSet::new(),
config,
shuffle_scheduled: false,
rng,
stats: Stats::default(),
pending_neighbor_requests: Default::default(),
peer_data: Default::default(),
alive_disconnect_peers: Default::default(),
}
}
pub fn handle(&mut self, event: InEvent<PI>, io: &mut impl IO<PI>) {
match event {
InEvent::RecvMessage(from, message) => self.handle_message(from, message, io),
InEvent::TimerExpired(timer) => match timer {
Timer::DoShuffle => self.handle_shuffle_timer(io),
Timer::PendingNeighborRequest(peer) => self.handle_pending_neighbor_timer(peer, io),
},
InEvent::PeerDisconnected(peer) => self.handle_connection_closed(peer, io),
InEvent::RequestJoin(peer) => self.handle_join(peer, io),
InEvent::UpdatePeerData(data) => {
self.me_data = Some(data);
}
InEvent::Quit => self.handle_quit(io),
}
// this will only happen on the first call
if !self.shuffle_scheduled {
io.push(OutEvent::ScheduleTimer(
self.config.shuffle_interval,
Timer::DoShuffle,
));
self.shuffle_scheduled = true;
}
}
fn handle_message(&mut self, from: PI, message: Message<PI>, io: &mut impl IO<PI>) {
let is_disconnect = matches!(message, Message::Disconnect(Disconnect { .. }));
if !is_disconnect && !self.active_view.contains(&from) {
self.stats.total_connections += 1;
}
match message {
Message::Join(data) => self.on_join(from, data, io),
Message::ForwardJoin(details) => self.on_forward_join(from, details, io),
Message::Shuffle(details) => self.on_shuffle(from, details, io),
Message::ShuffleReply(details) => self.on_shuffle_reply(details, io),
Message::Neighbor(details) => self.on_neighbor(from, details, io),
Message::Disconnect(details) => self.on_disconnect(from, details, io),
}
// Disconnect from passive nodes right after receiving a message.
// TODO(frando): I'm not sure anymore that this is correct. Maybe remove?
if !is_disconnect && !self.active_view.contains(&from) {
io.push(OutEvent::DisconnectPeer(from));
}
}
fn handle_join(&mut self, peer: PI, io: &mut impl IO<PI>) {
io.push(OutEvent::SendMessage(
peer,
Message::Join(self.me_data.clone()),
));
}
/// We received a disconnect message.
fn on_disconnect(&mut self, peer: PI, details: Disconnect, io: &mut impl IO<PI>) {
self.pending_neighbor_requests.remove(&peer);
if self.active_view.contains(&peer) {
self.remove_active(
&peer,
RemovalReason::DisconnectReceived {
is_alive: details.alive,
},
io,
);
} else if details.alive && self.passive_view.contains(&peer) {
self.alive_disconnect_peers.insert(peer);
}
}
/// A connection was closed by the peer.
fn handle_connection_closed(&mut self, peer: PI, io: &mut impl IO<PI>) {
self.pending_neighbor_requests.remove(&peer);
if self.active_view.contains(&peer) {
self.remove_active(&peer, RemovalReason::ConnectionClosed, io);
} else if !self.alive_disconnect_peers.remove(&peer) {
self.passive_view.remove(&peer);
self.peer_data.remove(&peer);
}
}
fn handle_quit(&mut self, io: &mut impl IO<PI>) {
for peer in self.active_view.clone().into_iter() {
self.active_view.remove(&peer);
self.send_disconnect(peer, false, io);
}
}
fn send_disconnect(&mut self, peer: PI, alive: bool, io: &mut impl IO<PI>) {
// Before disconnecting, send a `ShuffleReply` with some of our nodes to
// prevent the other node from running out of connections. This is especially
// relevant if the other node just joined the swarm.
self.send_shuffle_reply(
peer,
self.config.shuffle_active_view_count + self.config.shuffle_passive_view_count,
io,
);
let message = Message::Disconnect(Disconnect {
alive,
_respond: false,
});
io.push(OutEvent::SendMessage(peer, message));
io.push(OutEvent::DisconnectPeer(peer));
}
fn on_join(&mut self, peer: PI, data: Option<PeerData>, io: &mut impl IO<PI>) {
// "A node that receives a join request will start by adding the new
// node to its active view, even if it has to drop a random node from it. (6)"
self.add_active(peer, data.clone(), Priority::High, true, io);
// "The contact node c will then send to all other nodes in its active view a ForwardJoin
// request containing the new node identifier. Associated to the join procedure,
// there are two configuration parameters, named Active Random Walk Length (ARWL),
// that specifies the maximum number of hops a ForwardJoin request is propagated,
// and Passive Random Walk Length (PRWL), that specifies at which point in the walk the node
// is inserted in a passive view. To use these parameters, the ForwardJoin request carries
// a “time to live” field that is initially set to ARWL and decreased at every hop. (7)"
let ttl = self.config.active_random_walk_length;
let peer_info = PeerInfo { id: peer, data };
for node in self.active_view.iter_without(&peer) {
let message = Message::ForwardJoin(ForwardJoin {
peer: peer_info.clone(),
ttl,
});
io.push(OutEvent::SendMessage(*node, message));
}
}
fn on_forward_join(&mut self, sender: PI, message: ForwardJoin<PI>, io: &mut impl IO<PI>) {
let peer_id = message.peer.id;
// If the peer is already in our active view, we renew our neighbor relationship.
if self.active_view.contains(&peer_id) {
self.insert_peer_info(message.peer, io);
self.send_neighbor(peer_id, Priority::High, io);
}
// "i) If the time to live is equal to zero or if the number of nodes in ps active view is equal to one,
// it will add the new node to its active view (7)"
else if message.ttl.expired() || self.active_view.len() <= 1 {
self.insert_peer_info(message.peer, io);
// Modification from paper: Instead of adding the peer directly to our active view,
// we only send the Neighbor message. We will add the peer to our active view once we receive a
// reply from our neighbor.
// This prevents us adding unreachable peers to our active view.
self.send_neighbor(peer_id, Priority::High, io);
} else {
// "ii) If the time to live is equal to PRWL, p will insert the new node into its passive view"
if message.ttl == self.config.passive_random_walk_length {
self.add_passive(peer_id, message.peer.data.clone(), io);
}
// "iii) The time to live field is decremented."
// "iv) If, at this point, n has not been inserted
// in ps active view, p will forward the request to a random node in its active view
// (different from the one from which the request was received)."
if !self.active_view.contains(&peer_id)
&& !self.pending_neighbor_requests.contains(&peer_id)
{
match self
.active_view
.pick_random_without(&[&sender], &mut self.rng)
{
None => {
unreachable!("if the peer was not added, there are at least two peers in our active view.");
}
Some(next) => {
let message = Message::ForwardJoin(ForwardJoin {
peer: message.peer,
ttl: message.ttl.next(),
});
io.push(OutEvent::SendMessage(*next, message));
}
}
}
}
}
fn on_neighbor(&mut self, from: PI, details: Neighbor, io: &mut impl IO<PI>) {
let is_reply = self.pending_neighbor_requests.remove(&from);
let do_reply = !is_reply;
// "A node q that receives a high priority neighbor request will always accept the request, even
// if it has to drop a random member from its active view (again, the member that is dropped will
// receive a Disconnect notification). If a node q receives a low priority Neighbor request, it will
// only accept the request if it has a free slot in its active view, otherwise it will refuse the request."
if !self.add_active(from, details.data, details.priority, do_reply, io) {
self.send_disconnect(from, true, io);
}
}
/// Get the peer [`PeerInfo`] for a peer.
fn peer_info(&self, id: &PI) -> PeerInfo<PI> {
let data = self.peer_data.get(id).cloned();
PeerInfo { id: *id, data }
}
fn insert_peer_info(&mut self, peer_info: PeerInfo<PI>, io: &mut impl IO<PI>) {
if let Some(data) = peer_info.data {
let old = self.peer_data.remove(&peer_info.id);
let same = matches!(old, Some(old) if old == data);
if !same && !data.0.is_empty() {
io.push(OutEvent::PeerData(peer_info.id, data.clone()));
}
self.peer_data.insert(peer_info.id, data);
}
}
/// Handle a [`Message::Shuffle`]
///
/// > A node q that receives a Shuffle request will first decrease its time to live. If the time
/// > to live of the message is greater than zero and the number of nodes in qs active view is
/// > greater than 1, the node will select a random node from its active view, different from the
/// > one he received this shuffle message from, and simply forwards the Shuffle request.
/// > Otherwise, node q accepts the Shuffle request and send back (p.8)
fn on_shuffle(&mut self, from: PI, shuffle: Shuffle<PI>, io: &mut impl IO<PI>) {
if shuffle.ttl.expired() || self.active_view.len() <= 1 {
let len = shuffle.nodes.len();
for node in shuffle.nodes {
self.add_passive(node.id, node.data, io);
}
self.send_shuffle_reply(shuffle.origin, len, io);
} else if let Some(node) = self
.active_view
.pick_random_without(&[&shuffle.origin, &from], &mut self.rng)
{
let message = Message::Shuffle(Shuffle {
origin: shuffle.origin,
nodes: shuffle.nodes,
ttl: shuffle.ttl.next(),
});
io.push(OutEvent::SendMessage(*node, message));
}
}
fn send_shuffle_reply(&mut self, to: PI, len: usize, io: &mut impl IO<PI>) {
let mut nodes = self.passive_view.shuffled_and_capped(len, &mut self.rng);
// If we don't have enough passive nodes for the expected length, we fill with
// active nodes.
if nodes.len() < len {
nodes.extend(
self.active_view
.shuffled_and_capped(len - nodes.len(), &mut self.rng),
);
}
let nodes = nodes.into_iter().map(|id| self.peer_info(&id));
let message = Message::ShuffleReply(ShuffleReply {
nodes: nodes.collect(),
});
io.push(OutEvent::SendMessage(to, message));
}
fn on_shuffle_reply(&mut self, message: ShuffleReply<PI>, io: &mut impl IO<PI>) {
for node in message.nodes {
self.add_passive(node.id, node.data, io);
}
self.refill_active_from_passive(&[], io);
}
fn handle_shuffle_timer(&mut self, io: &mut impl IO<PI>) {
if let Some(node) = self.active_view.pick_random(&mut self.rng) {
let active = self.active_view.shuffled_without_and_capped(
&[node],
self.config.shuffle_active_view_count,
&mut self.rng,
);
let passive = self.passive_view.shuffled_without_and_capped(
&[node],
self.config.shuffle_passive_view_count,
&mut self.rng,
);
let nodes = active
.iter()
.chain(passive.iter())
.map(|id| self.peer_info(id));
let me = PeerInfo {
id: self.me,
data: self.me_data.clone(),
};
let nodes = nodes.chain([me]);
let message = Shuffle {
origin: self.me,
nodes: nodes.collect(),
ttl: self.config.shuffle_random_walk_length,
};
io.push(OutEvent::SendMessage(*node, Message::Shuffle(message)));
}
io.push(OutEvent::ScheduleTimer(
self.config.shuffle_interval,
Timer::DoShuffle,
));
}
fn passive_is_full(&self) -> bool {
self.passive_view.len() >= self.config.passive_view_capacity
}
fn active_is_full(&self) -> bool {
self.active_view.len() >= self.config.active_view_capacity
}
/// Add a peer to the passive view.
///
/// If the passive view is full, it will first remove a random peer and then insert the new peer.
/// If a peer is currently in the active view it will not be added.
fn add_passive(&mut self, peer: PI, data: Option<PeerData>, io: &mut impl IO<PI>) {
self.insert_peer_info((peer, data).into(), io);
if self.active_view.contains(&peer) || self.passive_view.contains(&peer) || peer == self.me
{
return;
}
if self.passive_is_full() {
self.passive_view.remove_random(&mut self.rng);
}
self.passive_view.insert(peer);
}
/// Remove a peer from the active view.
///
/// If `reason` is [`RemovalReason::Random`], a [`Disconnect`] message will be sent to the peer.
fn remove_active(&mut self, peer: &PI, reason: RemovalReason, io: &mut impl IO<PI>) {
if let Some(idx) = self.active_view.get_index_of(peer) {
let removed_peer = self.remove_active_by_index(idx, reason, io).unwrap();
self.refill_active_from_passive(&[&removed_peer], io);
}
}
fn refill_active_from_passive(&mut self, skip_peers: &[&PI], io: &mut impl IO<PI>) {
if self.active_view.len() + self.pending_neighbor_requests.len()
>= self.config.active_view_capacity
{
return;
}
// "When a node p suspects that one of the nodes present in its active view has failed
// (by either disconnecting or blocking), it selects a random node q from its passive view and
// attempts to establish a TCP connection with q. If the connection fails to establish,
// node q is considered failed and removed from ps passive view; another node q is selected
// at random and a new attempt is made. The procedure is repeated until a connection is established
// with success." (p7)
let mut skip_peers = skip_peers.to_vec();
skip_peers.extend(self.pending_neighbor_requests.iter());
if let Some(node) = self
.passive_view
.pick_random_without(&skip_peers, &mut self.rng)
.copied()
{
let priority = match self.active_view.is_empty() {
true => Priority::High,
false => Priority::Low,
};
self.send_neighbor(node, priority, io);
// schedule a timer that checks if the node replied with a neighbor message,
// otherwise try again with another passive node.
io.push(OutEvent::ScheduleTimer(
self.config.neighbor_request_timeout,
Timer::PendingNeighborRequest(node),
));
};
}
fn handle_pending_neighbor_timer(&mut self, peer: PI, io: &mut impl IO<PI>) {
if self.pending_neighbor_requests.remove(&peer) {
self.passive_view.remove(&peer);
self.refill_active_from_passive(&[], io);
}
}
fn remove_active_by_index(
&mut self,
peer_index: usize,
reason: RemovalReason,
io: &mut impl IO<PI>,
) -> Option<PI> {
if let Some(peer) = self.active_view.remove_index(peer_index) {
io.push(OutEvent::EmitEvent(Event::NeighborDown(peer)));
match reason {
// send a disconnect message, then close connection.
RemovalReason::Random => self.send_disconnect(peer, true, io),
// close connection without sending anything further.
RemovalReason::DisconnectReceived { is_alive: _ } => {
io.push(OutEvent::DisconnectPeer(peer))
}
RemovalReason::ConnectionClosed => io.push(OutEvent::DisconnectPeer(peer)),
}
let keep_as_passive = match reason {
// keep alive if previously marked as alive.
RemovalReason::ConnectionClosed => self.alive_disconnect_peers.remove(&peer),
// keep alive if other peer said to be still alive.
RemovalReason::DisconnectReceived { is_alive } => is_alive,
// keep alive (only we are removing for now)
RemovalReason::Random => true,
};
if keep_as_passive {
let data = self.peer_data.remove(&peer);
self.add_passive(peer, data, io);
// mark peer as alive, so it doesn't get removed from the passive view if the conn closes.
if !matches!(reason, RemovalReason::ConnectionClosed) {
self.alive_disconnect_peers.insert(peer);
}
}
debug!(other = ?peer, "removed from active view, reason: {reason:?}");
Some(peer)
} else {
None
}
}
/// Remove a random peer from the active view.
fn free_random_slot_in_active_view(&mut self, io: &mut impl IO<PI>) {
if let Some(index) = self.active_view.pick_random_index(&mut self.rng) {
self.remove_active_by_index(index, RemovalReason::Random, io);
}
}
/// Add a peer to the active view.
///
/// If the active view is currently full, a random peer will be removed first.
/// Sends a Neighbor message to the peer. If high_priority is true, the peer
/// may not deny the Neighbor request.
fn add_active(
&mut self,
peer: PI,
data: Option<PeerData>,
priority: Priority,
reply: bool,
io: &mut impl IO<PI>,
) -> bool {
if peer == self.me {
return false;
}
self.insert_peer_info((peer, data).into(), io);
if self.active_view.contains(&peer) {
if reply {
self.send_neighbor(peer, priority, io);
}
return true;
}
match (priority, self.active_is_full()) {
(Priority::High, is_full) => {
if is_full {
self.free_random_slot_in_active_view(io);
}
self.add_active_unchecked(peer, Priority::High, reply, io);
true
}
(Priority::Low, false) => {
self.add_active_unchecked(peer, Priority::Low, reply, io);
true
}
(Priority::Low, true) => false,
}
}
fn add_active_unchecked(
&mut self,
peer: PI,
priority: Priority,
reply: bool,
io: &mut impl IO<PI>,
) {
self.passive_view.remove(&peer);
if self.active_view.insert(peer) {
debug!(other = ?peer, "add to active view");
io.push(OutEvent::EmitEvent(Event::NeighborUp(peer)));
if reply {
self.send_neighbor(peer, priority, io);
}
}
}
fn send_neighbor(&mut self, peer: PI, priority: Priority, io: &mut impl IO<PI>) {
if self.pending_neighbor_requests.insert(peer) {
let message = Message::Neighbor(Neighbor {
priority,
data: self.me_data.clone(),
});
io.push(OutEvent::SendMessage(peer, message));
}
}
}
#[derive(Debug)]
enum RemovalReason {
/// A peer is removed because the connection was closed ungracefully.
ConnectionClosed,
/// A peer is removed because we received a disconnect message.
DisconnectReceived { is_alive: bool },
/// A peer is removed after random selection to make room for a newly joined peer.
Random,
}

View file

@ -0,0 +1,909 @@
//! Implementation of the Plumtree epidemic broadcast tree protocol
//!
//! The implementation is based on [this paper][paper] by Joao Leitao, Jose Pereira, Luıs Rodrigues
//! and the [example implementation][impl] by Bartosz Sypytkowski
//!
//! [paper]: https://asc.di.fct.unl.pt/~jleitao/pdf/srds07-leitao.pdf
//! [impl]: https://gist.github.com/Horusiath/84fac596101b197da0546d1697580d99
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque},
hash::Hash,
};
use bytes::Bytes;
use derive_more::{Add, From, Sub};
use n0_future::time::{Duration, Instant};
use postcard::experimental::max_size::MaxSize;
use serde::{Deserialize, Serialize};
use tracing::{debug, warn};
use super::{
util::{idbytes_impls, TimeBoundCache},
PeerIdentity, IO,
};
/// A message identifier, which is the message content's blake3 hash.
#[derive(Serialize, Deserialize, Clone, Hash, Copy, PartialEq, Eq, MaxSize)]
pub struct MessageId([u8; 32]);
idbytes_impls!(MessageId, "MessageId");
impl MessageId {
/// Create a `[MessageId]` by hashing the message content.
///
/// This hashes the input with [`blake3::hash`].
pub fn from_content(message: &[u8]) -> Self {
Self::from(blake3::hash(message))
}
}
/// Events Plumtree is informed of from the peer sampling service and IO layer.
#[derive(Debug)]
pub enum InEvent<PI> {
/// A [`Message`] was received from the peer.
RecvMessage(PI, Message),
/// Broadcast the contained payload to the given scope.
Broadcast(Bytes, Scope),
/// A timer has expired.
TimerExpired(Timer),
/// New member `PI` has joined the topic.
NeighborUp(PI),
/// Peer `PI` has disconnected from the topic.
NeighborDown(PI),
}
/// Events Plumtree emits.
#[derive(Debug, PartialEq, Eq)]
pub enum OutEvent<PI> {
/// Ask the IO layer to send a [`Message`] to peer `PI`.
SendMessage(PI, Message),
/// Schedule a [`Timer`].
ScheduleTimer(Duration, Timer),
/// Emit an [`Event`] to the application.
EmitEvent(Event<PI>),
}
/// Kinds of timers Plumtree needs to schedule.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Timer {
/// Request the content for [`MessageId`] by sending [`Message::Graft`].
///
/// The message will be sent to a peer that sent us an [`Message::IHave`] for this [`MessageId`],
/// which will send us the message content in reply and also move the peer into the eager set.
/// Will be a no-op if the message for [`MessageId`] was already received from another peer by now.
SendGraft(MessageId),
/// Dispatch the [`Message::IHave`] in our lazy push queue.
DispatchLazyPush,
/// Evict the message cache
EvictCache,
}
/// Event emitted by the [`State`] to the application.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Event<PI> {
/// A new gossip message was received.
Received(GossipEvent<PI>),
}
#[derive(Clone, derive_more::Debug, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]
pub struct GossipEvent<PI> {
/// The content of the gossip message.
#[debug("<{}b>", content.len())]
pub content: Bytes,
/// The peer that we received the gossip message from. Note that this is not the peer that
/// originally broadcasted the message, but the peer before us in the gossiping path.
pub delivered_from: PI,
/// The broadcast scope of the message.
pub scope: DeliveryScope,
}
impl<PI> GossipEvent<PI> {
fn from_message(message: &Gossip, from: PI) -> Self {
Self {
content: message.content.clone(),
scope: message.scope,
delivered_from: from,
}
}
}
/// Number of delivery hops a message has taken.
#[derive(
From,
Add,
Sub,
Serialize,
Deserialize,
Eq,
PartialEq,
PartialOrd,
Ord,
Clone,
Copy,
Debug,
Hash,
MaxSize,
)]
pub struct Round(u16);
impl Round {
pub fn next(&self) -> Round {
Round(self.0 + 1)
}
}
/// Messages that we can send and receive from peers within the topic.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum Message {
/// When receiving Gossip, emit as event and forward full message to eager peer and (after a
/// delay) message IDs to lazy peers.
Gossip(Gossip),
/// When receiving Prune, move the peer from the eager to the lazy set.
Prune,
/// When receiving Graft, move the peer to the eager set and send the full content for the
/// included message ID.
Graft(Graft),
/// When receiving IHave, do nothing initially, and request the messages for the included
/// message IDs after some time if they aren't pushed eagerly to us.
IHave(Vec<IHave>),
}
/// Payload messages transmitted by the protocol.
#[derive(Serialize, Deserialize, Clone, derive_more::Debug, PartialEq, Eq)]
pub struct Gossip {
/// Id of the message.
id: MessageId,
/// Message contents.
#[debug("<{}b>", content.len())]
content: Bytes,
/// Scope to broadcast to.
scope: DeliveryScope,
}
impl Gossip {
fn round(&self) -> Option<Round> {
match self.scope {
DeliveryScope::Swarm(round) => Some(round),
DeliveryScope::Neighbors => None,
}
}
}
/// The scope to deliver the message to.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Copy)]
pub enum DeliveryScope {
/// This message was received from the swarm, with a distance (in hops) travelled from the
/// original broadcaster.
Swarm(Round),
/// This message was received from a direct neighbor that broadcasted the message to neighbors
/// only.
Neighbors,
}
impl DeliveryScope {
/// Whether this message was directly received from its publisher.
pub fn is_direct(&self) -> bool {
matches!(self, Self::Neighbors | Self::Swarm(Round(0)))
}
}
/// The broadcast scope of a gossip message.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Copy)]
pub enum Scope {
/// The message is broadcast to all peers in the swarm.
Swarm,
/// The message is broadcast only to the immediate neighbors of a peer.
Neighbors,
}
impl Gossip {
/// Get a clone of this `Gossip` message and increase the delivery round by 1.
pub fn next_round(&self) -> Option<Gossip> {
match self.scope {
DeliveryScope::Neighbors => None,
DeliveryScope::Swarm(round) => Some(Gossip {
id: self.id,
content: self.content.clone(),
scope: DeliveryScope::Swarm(round.next()),
}),
}
}
/// Validate that the message id is the blake3 hash of the message content.
pub fn validate(&self) -> bool {
let expected = MessageId::from_content(&self.content);
expected == self.id
}
}
/// Control message to inform peers we have a message without transmitting the whole payload.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, MaxSize)]
pub struct IHave {
/// Id of the message.
pub(crate) id: MessageId,
/// Delivery round of the message.
pub(crate) round: Round,
}
/// Control message to signal a peer that they have been moved to the eager set, and to ask the
/// peer to do the same with this node.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Graft {
/// Message id that triggers the graft, if any.
/// On receiving a graft, the payload message must be sent in reply if a message id is set.
id: Option<MessageId>,
/// Delivery round of the [`Message::IHave`] that triggered this Graft message.
round: Round,
}
/// Configuration for the gossip broadcast layer.
///
/// Currently, the expectation is that the configuration is the same for all peers in the
/// network (as recommended in the paper).
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// When receiving an `IHave` message, this timeout is registered. If the message for the
/// `IHave` was not received once the timeout is expired, a `Graft` message is sent to the
/// peer that sent us the `IHave` to request the message payload.
///
/// The plumtree paper notes:
/// > The timeout value is a protocol parameter that should be configured considering the
/// > diameter of the overlay and a target maximum recovery latency, defined by the application
/// > requirements. (p.8)
pub graft_timeout_1: Duration,
/// This timeout is registered when sending a `Graft` message. If a reply has not been
/// received once the timeout expires, we send another `Graft` message to the next peer that
/// sent us an `IHave` for this message.
///
/// The plumtree paper notes:
/// > This second timeout value should be smaller that the first, in the order of an average
/// > round trip time to a neighbor.
pub graft_timeout_2: Duration,
/// Timeout after which `IHave` messages are pushed to peers.
pub dispatch_timeout: Duration,
/// The protocol performs a tree optimization, which promotes lazy peers to eager peers if the
/// `Message::IHave` messages received from them have a lower number of hops from the
/// message's origin as the `InEvent::Broadcast` messages received from our eager peers. This
/// parameter is the number of hops that the lazy peers must be closer to the origin than our
/// eager peers to be promoted to become an eager peer.
pub optimization_threshold: Round,
/// Duration for which to keep gossip messages in the internal message cache.
///
/// Messages broadcast from this node or received from other nodes are kept in an internal
/// cache for this duration before being evicted. If this is too low, other nodes will not be
/// able to retrieve messages once they need them. If this is high, the cache will grow.
///
/// Should be at least around several round trip times to peers.
pub message_cache_retention: Duration,
/// Duration for which to keep the `MessageId`s for received messages.
///
/// Should be at least as long as [`Self::message_cache_retention`], usually will be longer to
/// not accidentally receive messages multiple times.
pub message_id_retention: Duration,
/// How often the internal caches will be checked for expired items.
pub cache_evict_interval: Duration,
}
impl Default for Config {
/// Sensible defaults for the plumtree configuration
//
// TODO: Find out what good defaults are for the three timeouts here. Current numbers are
// guesses that need validation. The paper does not have concrete recommendations for these
// numbers.
fn default() -> Self {
Self {
// Paper: "The timeout value is a protocol parameter that should be configured considering
// the diameter of the overlay and a target maximum recovery latency, defined by the
// application requirements. This is a parameter that should be statically configured
// at deployment time." (p. 8)
//
// Earthstar has 5ms it seems, see https://github.com/earthstar-project/earthstar/blob/1523c640fedf106f598bf79b184fb0ada64b1cc0/src/syncer/plum_tree.ts#L75
// However in the paper it is more like a few roundtrips if I read things correctly.
graft_timeout_1: Duration::from_millis(80),
// Paper: "This second timeout value should be smaller that the first, in the order of an
// average round trip time to a neighbor." (p. 9)
//
// Earthstar doesn't have this step from my reading.
graft_timeout_2: Duration::from_millis(40),
// Again, paper does not tell a recommended number here. Likely should be quite small,
// as to not delay messages without need. This would also be the time frame in which
// `IHave`s are aggregated to save on packets.
//
// Eartstar dispatches immediately from my reading.
dispatch_timeout: Duration::from_millis(5),
// This number comes from experiment settings the plumtree paper (p. 12)
optimization_threshold: Round(7),
// This is a certainly-high-enough value for usual operation.
message_cache_retention: Duration::from_secs(30),
message_id_retention: Duration::from_secs(90),
cache_evict_interval: Duration::from_secs(1),
}
}
}
/// Stats about this topic's plumtree.
#[derive(Debug, Default, Clone)]
pub struct Stats {
/// Number of payload messages received so far.
///
/// See [`Message::Gossip`].
pub payload_messages_received: u64,
/// Number of control messages received so far.
///
/// See [`Message::Prune`], [`Message::Graft`], [`Message::IHave`].
pub control_messages_received: u64,
/// Max round seen so far.
pub max_last_delivery_hop: u16,
}
/// State of the plumtree.
#[derive(Debug)]
pub struct State<PI> {
/// Our address.
me: PI,
/// Configuration for this plumtree.
config: Config,
/// Set of peers used for payload exchange.
pub(crate) eager_push_peers: BTreeSet<PI>,
/// Set of peers used for control message exchange.
pub(crate) lazy_push_peers: BTreeSet<PI>,
lazy_push_queue: BTreeMap<PI, Vec<IHave>>,
/// Messages for which a [`MessageId`] has been seen via a [`Message::IHave`] but we have not
/// yet received the full payload. For each, we store the peers that have claimed to have this
/// message.
missing_messages: HashMap<MessageId, VecDeque<(PI, Round)>>,
/// Messages for which the full payload has been seen.
received_messages: TimeBoundCache<MessageId, ()>,
/// Payloads of received messages.
cache: TimeBoundCache<MessageId, Gossip>,
/// Message ids for which a [`Timer::SendGraft`] has been scheduled.
graft_timer_scheduled: HashSet<MessageId>,
/// Whether a [`Timer::DispatchLazyPush`] has been scheduled.
dispatch_timer_scheduled: bool,
/// Set to false after the first message is received. Used for initial timer scheduling.
init: bool,
/// [`Stats`] of this plumtree.
pub(crate) stats: Stats,
max_message_size: usize,
}
impl<PI: PeerIdentity> State<PI> {
/// Initialize the [`State`] of a plumtree.
pub fn new(me: PI, config: Config, max_message_size: usize) -> Self {
Self {
me,
eager_push_peers: Default::default(),
lazy_push_peers: Default::default(),
lazy_push_queue: Default::default(),
config,
missing_messages: Default::default(),
received_messages: Default::default(),
graft_timer_scheduled: Default::default(),
dispatch_timer_scheduled: false,
cache: Default::default(),
init: false,
stats: Default::default(),
max_message_size,
}
}
/// Handle an [`InEvent`].
pub fn handle(&mut self, event: InEvent<PI>, now: Instant, io: &mut impl IO<PI>) {
if !self.init {
self.init = true;
self.on_evict_cache_timer(now, io)
}
match event {
InEvent::RecvMessage(from, message) => self.handle_message(from, message, now, io),
InEvent::Broadcast(data, scope) => self.broadcast(data, scope, now, io),
InEvent::NeighborUp(peer) => self.on_neighbor_up(peer),
InEvent::NeighborDown(peer) => self.on_neighbor_down(peer),
InEvent::TimerExpired(timer) => match timer {
Timer::DispatchLazyPush => self.on_dispatch_timer(io),
Timer::SendGraft(id) => {
self.on_send_graft_timer(id, io);
}
Timer::EvictCache => self.on_evict_cache_timer(now, io),
},
}
}
/// Get access to the [`Stats`] of the plumtree.
pub fn stats(&self) -> &Stats {
&self.stats
}
/// Handle receiving a [`Message`].
fn handle_message(&mut self, sender: PI, message: Message, now: Instant, io: &mut impl IO<PI>) {
if matches!(message, Message::Gossip(_)) {
self.stats.payload_messages_received += 1;
} else {
self.stats.control_messages_received += 1;
}
match message {
Message::Gossip(details) => self.on_gossip(sender, details, now, io),
Message::Prune => self.on_prune(sender),
Message::IHave(details) => self.on_ihave(sender, details, io),
Message::Graft(details) => self.on_graft(sender, details, io),
}
}
/// Dispatches messages from lazy queue over to lazy peers.
fn on_dispatch_timer(&mut self, io: &mut impl IO<PI>) {
let chunk_size = self.max_message_size
// Space for discriminator
- 1
// Space for length prefix
- 2;
let chunk_len = chunk_size / IHave::POSTCARD_MAX_SIZE;
while let Some((peer, list)) = self.lazy_push_queue.pop_first() {
for chunk in list.chunks(chunk_len) {
io.push(OutEvent::SendMessage(peer, Message::IHave(chunk.to_vec())));
}
}
self.dispatch_timer_scheduled = false;
}
/// Send a gossip message.
///
/// Will be pushed in full to eager peers.
/// Pushing the message id to the lazy peers is delayed by a timer.
fn broadcast(&mut self, content: Bytes, scope: Scope, now: Instant, io: &mut impl IO<PI>) {
let id = MessageId::from_content(&content);
let scope = match scope {
Scope::Neighbors => DeliveryScope::Neighbors,
Scope::Swarm => DeliveryScope::Swarm(Round(0)),
};
let message = Gossip { id, content, scope };
let me = self.me;
if let DeliveryScope::Swarm(_) = scope {
self.received_messages
.insert(id, (), now + self.config.message_id_retention);
self.cache.insert(
id,
message.clone(),
now + self.config.message_cache_retention,
);
self.lazy_push(message.clone(), &me, io);
}
self.eager_push(message.clone(), &me, io);
}
/// Handle receiving a [`Message::Gossip`].
fn on_gossip(&mut self, sender: PI, message: Gossip, now: Instant, io: &mut impl IO<PI>) {
// Validate that the message id is the blake3 hash of the message content.
if !message.validate() {
// TODO: Do we want to take any measures against the sender if we received a message
// with a spoofed message id?
warn!(
peer = ?sender,
"Received a message with spoofed message id ({})", message.id
);
return;
}
// if we already received this message: move peer to lazy set
// and notify peer about this.
if self.received_messages.contains_key(&message.id) {
self.add_lazy(sender);
io.push(OutEvent::SendMessage(sender, Message::Prune));
// otherwise store the message, emit to application and forward to peers
} else {
if let DeliveryScope::Swarm(prev_round) = message.scope {
// insert the message in the list of received messages
self.received_messages.insert(
message.id,
(),
now + self.config.message_id_retention,
);
// increase the round for forwarding the message, and add to cache
// to reply to Graft messages later
// TODO: add callback/event to application to get missing messages that were received before?
let message = message.next_round().expect("just checked");
self.cache.insert(
message.id,
message.clone(),
now + self.config.message_cache_retention,
);
// push the message to our peers
self.eager_push(message.clone(), &sender, io);
self.lazy_push(message.clone(), &sender, io);
// cleanup places where we track missing messages
self.graft_timer_scheduled.remove(&message.id);
let previous_ihaves = self.missing_messages.remove(&message.id);
// do the optimization step from the paper
if let Some(previous_ihaves) = previous_ihaves {
self.optimize_tree(&sender, &message, previous_ihaves, io);
}
self.stats.max_last_delivery_hop =
self.stats.max_last_delivery_hop.max(prev_round.0);
}
// emit event to application
io.push(OutEvent::EmitEvent(Event::Received(
GossipEvent::from_message(&message, sender),
)));
}
}
/// Optimize the tree by pruning the `sender` of a [`Message::Gossip`] if we previously
/// received a [`Message::IHave`] for the same message with a much lower number of delivery
/// hops from the original broadcaster of the message.
///
/// See [Config::optimization_threshold].
fn optimize_tree(
&mut self,
gossip_sender: &PI,
message: &Gossip,
previous_ihaves: VecDeque<(PI, Round)>,
io: &mut impl IO<PI>,
) {
let round = message.round().expect("only called for swarm messages");
let best_ihave = previous_ihaves
.iter()
.min_by(|(_a_peer, a_round), (_b_peer, b_round)| a_round.cmp(b_round))
.copied();
if let Some((ihave_peer, ihave_round)) = best_ihave {
if (ihave_round < round) && (round - ihave_round) >= self.config.optimization_threshold
{
// Graft the sender of the IHave, but only if it's not already eager.
if !self.eager_push_peers.contains(&ihave_peer) {
let message = Message::Graft(Graft {
id: None,
round: ihave_round,
});
self.add_eager(ihave_peer);
io.push(OutEvent::SendMessage(ihave_peer, message));
}
// Prune the sender of the Gossip.
self.add_lazy(*gossip_sender);
io.push(OutEvent::SendMessage(*gossip_sender, Message::Prune));
}
}
}
/// Handle receiving a [`Message::Prune`].
fn on_prune(&mut self, sender: PI) {
self.add_lazy(sender);
}
/// Handle receiving a [`Message::IHave`].
///
/// > When a node receives a IHAVE message, it simply marks the corresponding message as
/// > missing It then starts a timer, with a predefined timeout value, and waits for the missing
/// > message to be received via eager push before the timer expires. The timeout value is a
/// > protocol parameter that should be configured considering the diameter of the overlay and a
/// > target maximum recovery latency, defined by the application requirements. This is a
/// > parameter that should be statically configured at deployment time. (p8)
fn on_ihave(&mut self, sender: PI, ihaves: Vec<IHave>, io: &mut impl IO<PI>) {
for ihave in ihaves {
if !self.received_messages.contains_key(&ihave.id) {
self.missing_messages
.entry(ihave.id)
.or_default()
.push_back((sender, ihave.round));
if !self.graft_timer_scheduled.contains(&ihave.id) {
self.graft_timer_scheduled.insert(ihave.id);
io.push(OutEvent::ScheduleTimer(
self.config.graft_timeout_1,
Timer::SendGraft(ihave.id),
));
}
}
}
}
/// A scheduled [`Timer::SendGraft`] has reached it's deadline.
fn on_send_graft_timer(&mut self, id: MessageId, io: &mut impl IO<PI>) {
self.graft_timer_scheduled.remove(&id);
// if the message was received before the timer ran out, there is no need to request it
// again
if self.received_messages.contains_key(&id) {
return;
}
// get the first peer that advertised this message
let entry = self
.missing_messages
.get_mut(&id)
.and_then(|entries| entries.pop_front());
if let Some((peer, round)) = entry {
self.add_eager(peer);
let message = Message::Graft(Graft {
id: Some(id),
round,
});
io.push(OutEvent::SendMessage(peer, message));
// "when a GRAFT message is sent, another timer is started to expire after a certain timeout,
// to ensure that the message will be requested to another neighbor if it is not received
// meanwhile. This second timeout value should be smaller that the first, in the order of
// an average round trip time to a neighbor." (p9)
io.push(OutEvent::ScheduleTimer(
self.config.graft_timeout_2,
Timer::SendGraft(id),
));
}
}
/// Handle receiving a [`Message::Graft`].
fn on_graft(&mut self, sender: PI, details: Graft, io: &mut impl IO<PI>) {
self.add_eager(sender);
if let Some(id) = details.id {
if let Some(message) = self.cache.get(&id) {
io.push(OutEvent::SendMessage(
sender,
Message::Gossip(message.clone()),
));
} else {
debug!(?id, peer=?sender, "on_graft failed to graft: message not in cache");
}
}
}
/// Handle a [`InEvent::NeighborUp`] when a peer joins the topic.
fn on_neighbor_up(&mut self, peer: PI) {
self.add_eager(peer);
}
/// Handle a [`InEvent::NeighborDown`] when a peer leaves the topic.
/// > When a neighbor is detected to leave the overlay, it is simple removed from the
/// > membership. Furthermore, the record of IHAVE messages sent from failed members is deleted
/// > from the missing history. (p9)
fn on_neighbor_down(&mut self, peer: PI) {
self.missing_messages.retain(|_message_id, ihaves| {
ihaves.retain(|(ihave_peer, _round)| *ihave_peer != peer);
!ihaves.is_empty()
});
self.eager_push_peers.remove(&peer);
self.lazy_push_peers.remove(&peer);
}
fn on_evict_cache_timer(&mut self, now: Instant, io: &mut impl IO<PI>) {
self.cache.expire_until(now);
io.push(OutEvent::ScheduleTimer(
self.config.cache_evict_interval,
Timer::EvictCache,
));
}
/// Moves peer into eager set.
fn add_eager(&mut self, peer: PI) {
self.lazy_push_peers.remove(&peer);
self.eager_push_peers.insert(peer);
}
/// Moves peer into lazy set.
fn add_lazy(&mut self, peer: PI) {
self.eager_push_peers.remove(&peer);
self.lazy_push_peers.insert(peer);
}
/// Immediately sends message to eager peers.
fn eager_push(&mut self, gossip: Gossip, sender: &PI, io: &mut impl IO<PI>) {
for peer in self
.eager_push_peers
.iter()
.filter(|peer| **peer != self.me && *peer != sender)
{
io.push(OutEvent::SendMessage(
*peer,
Message::Gossip(gossip.clone()),
));
}
}
/// Queue lazy message announcements into the queue that will be sent out as batched
/// [`Message::IHave`] messages once the [`Timer::DispatchLazyPush`] timer is triggered.
fn lazy_push(&mut self, gossip: Gossip, sender: &PI, io: &mut impl IO<PI>) {
let Some(round) = gossip.round() else {
return;
};
for peer in self.lazy_push_peers.iter().filter(|x| *x != sender) {
self.lazy_push_queue.entry(*peer).or_default().push(IHave {
id: gossip.id,
round,
});
}
if !self.dispatch_timer_scheduled {
io.push(OutEvent::ScheduleTimer(
self.config.dispatch_timeout,
Timer::DispatchLazyPush,
));
self.dispatch_timer_scheduled = true;
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn optimize_tree() {
let mut io = VecDeque::new();
let config: Config = Default::default();
let mut state = State::new(1, config.clone(), 1024);
let now = Instant::now();
// we receive an IHave message from peer 2
// it has `round: 2` which means that the the peer that sent us the IHave was
// two hops away from the original sender of the message
let content: Bytes = b"hi".to_vec().into();
let id = MessageId::from_content(&content);
let event = InEvent::RecvMessage(
2u32,
Message::IHave(vec![IHave {
id,
round: Round(2),
}]),
);
state.handle(event, now, &mut io);
io.clear();
// we then receive a `Gossip` message with the same `MessageId` from peer 3
// the message has `round: 6`, which means it travelled 6 hops until it reached us
// this is less hops than to peer 2, but not enough to trigger the optimization
// because we use the default config which has `optimization_threshold: 7`
let event = InEvent::RecvMessage(
3,
Message::Gossip(Gossip {
id,
content: content.clone(),
scope: DeliveryScope::Swarm(Round(6)),
}),
);
state.handle(event, now, &mut io);
let expected = {
// we expect a dispatch timer schedule and receive event, but no Graft or Prune
// messages
let mut io = VecDeque::new();
io.push(OutEvent::ScheduleTimer(
config.dispatch_timeout,
Timer::DispatchLazyPush,
));
io.push(OutEvent::EmitEvent(Event::Received(GossipEvent {
content,
delivered_from: 3,
scope: DeliveryScope::Swarm(Round(6)),
})));
io
};
assert_eq!(io, expected);
io.clear();
// now we run the same flow again but this time peer 3 is 9 hops away from the message's
// sender. message's sender. this will trigger the optimization:
// peer 2 will be promoted to eager and peer 4 demoted to lazy
let content: Bytes = b"hi2".to_vec().into();
let id = MessageId::from_content(&content);
let event = InEvent::RecvMessage(
2u32,
Message::IHave(vec![IHave {
id,
round: Round(2),
}]),
);
state.handle(event, now, &mut io);
io.clear();
let event = InEvent::RecvMessage(
3,
Message::Gossip(Gossip {
id,
content: content.clone(),
scope: DeliveryScope::Swarm(Round(9)),
}),
);
state.handle(event, now, &mut io);
let expected = {
// this time we expect the Graft and Prune messages to be sent, performing the
// optimization step
let mut io = VecDeque::new();
io.push(OutEvent::SendMessage(
2,
Message::Graft(Graft {
id: None,
round: Round(2),
}),
));
io.push(OutEvent::SendMessage(3, Message::Prune));
io.push(OutEvent::EmitEvent(Event::Received(GossipEvent {
content,
delivered_from: 3,
scope: DeliveryScope::Swarm(Round(9)),
})));
io
};
assert_eq!(io, expected);
}
#[test]
fn spoofed_messages_are_ignored() {
let config: Config = Default::default();
let mut state = State::new(1, config.clone(), 1024);
let now = Instant::now();
// we recv a correct gossip message and expect the Received event to be emitted
let content: Bytes = b"hello1".to_vec().into();
let message = Message::Gossip(Gossip {
content: content.clone(),
id: MessageId::from_content(&content),
scope: DeliveryScope::Swarm(Round(1)),
});
let mut io = VecDeque::new();
state.handle(InEvent::RecvMessage(2, message), now, &mut io);
let expected = {
let mut io = VecDeque::new();
io.push(OutEvent::ScheduleTimer(
config.cache_evict_interval,
Timer::EvictCache,
));
io.push(OutEvent::ScheduleTimer(
config.dispatch_timeout,
Timer::DispatchLazyPush,
));
io.push(OutEvent::EmitEvent(Event::Received(GossipEvent {
content,
delivered_from: 2,
scope: DeliveryScope::Swarm(Round(1)),
})));
io
};
assert_eq!(io, expected);
// now we recv with a spoofed id and expect no event to be emitted
let content: Bytes = b"hello2".to_vec().into();
let message = Message::Gossip(Gossip {
content,
id: MessageId::from_content(b"foo"),
scope: DeliveryScope::Swarm(Round(1)),
});
let mut io = VecDeque::new();
state.handle(InEvent::RecvMessage(2, message), now, &mut io);
let expected = VecDeque::new();
assert_eq!(io, expected);
}
#[test]
fn cache_is_evicted() {
let config: Config = Default::default();
let mut state = State::new(1, config.clone(), 1024);
let now = Instant::now();
let content: Bytes = b"hello1".to_vec().into();
let message = Message::Gossip(Gossip {
content: content.clone(),
id: MessageId::from_content(&content),
scope: DeliveryScope::Swarm(Round(1)),
});
let mut io = VecDeque::new();
state.handle(InEvent::RecvMessage(2, message), now, &mut io);
assert_eq!(state.cache.len(), 1);
let now = now + Duration::from_secs(1);
state.handle(InEvent::TimerExpired(Timer::EvictCache), now, &mut io);
assert_eq!(state.cache.len(), 1);
let now = now + config.message_cache_retention;
state.handle(InEvent::TimerExpired(Timer::EvictCache), now, &mut io);
assert_eq!(state.cache.len(), 0);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,381 @@
//! The protocol state of the `iroh-gossip` protocol.
use std::collections::{hash_map, HashMap, HashSet};
use n0_future::time::{Duration, Instant};
use rand::Rng;
use serde::{Deserialize, Serialize};
use tracing::trace;
use crate::{
metrics::Metrics,
proto::{
topic::{self, Command},
util::idbytes_impls,
Config, PeerData, PeerIdentity, MIN_MAX_MESSAGE_SIZE,
},
};
/// The identifier for a topic
#[derive(Clone, Copy, Eq, PartialEq, Hash, Serialize, Ord, PartialOrd, Deserialize)]
pub struct TopicId([u8; 32]);
idbytes_impls!(TopicId, "TopicId");
impl TopicId {
/// Convert to a hex string limited to the first 5 bytes for a friendly string
/// representation of the key.
pub fn fmt_short(&self) -> String {
data_encoding::HEXLOWER.encode(&self.as_bytes()[..5])
}
}
/// Protocol wire message
///
/// This is the wire frame of the `iroh-gossip` protocol.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Message<PI> {
pub(crate) topic: TopicId,
pub(crate) message: topic::Message<PI>,
}
impl<PI> Message<PI> {
/// Get the kind of this message
pub fn kind(&self) -> MessageKind {
self.message.kind()
}
}
impl<PI: Serialize> Message<PI> {
pub(crate) fn postcard_header_size() -> usize {
// We create a message that has no payload (gossip::Message::Prune), calculate the encoded size,
// and subtract 1 for the discriminator of the inner gossip::Message enum.
let m = Self {
topic: TopicId(Default::default()),
message: topic::Message::<PI>::Gossip(super::plumtree::Message::Prune),
};
postcard::experimental::serialized_size(&m).unwrap() - 1
}
}
/// Whether this is a control or data message
#[derive(Debug)]
pub enum MessageKind {
/// A data message.
Data,
/// A control message.
Control,
}
impl<PI: Serialize> Message<PI> {
/// Get the encoded size of this message
pub fn size(&self) -> postcard::Result<usize> {
postcard::experimental::serialized_size(&self)
}
}
/// A timer to be registered into the runtime
///
/// As the implementation of the protocol is an IO-less state machine, registering timers does not
/// happen within the protocol implementation. Instead, these `Timer` structs are emitted as
/// [`OutEvent`]s. The implementer must register the timer in its runtime to be emitted on the specified [`Instant`],
/// and once triggered inject an [`InEvent::TimerExpired`] into the protocol state.
#[derive(Clone, Debug)]
pub struct Timer<PI> {
topic: TopicId,
timer: topic::Timer<PI>,
}
/// Input event to the protocol state.
#[derive(Clone, Debug)]
pub enum InEvent<PI> {
/// Message received from the network.
RecvMessage(PI, Message<PI>),
/// Execute a command from the application.
Command(TopicId, Command<PI>),
/// Trigger a previously scheduled timer.
TimerExpired(Timer<PI>),
/// Peer disconnected on the network level.
PeerDisconnected(PI),
/// Update the opaque peer data about yourself.
UpdatePeerData(PeerData),
}
/// Output event from the protocol state.
#[derive(Debug, Clone)]
pub enum OutEvent<PI> {
/// Send a message on the network
SendMessage(PI, Message<PI>),
/// Emit an event to the application.
EmitEvent(TopicId, topic::Event<PI>),
/// Schedule a timer. The runtime is responsible for sending an [InEvent::TimerExpired]
/// after the duration.
ScheduleTimer(Duration, Timer<PI>),
/// Close the connection to a peer on the network level.
DisconnectPeer(PI),
/// Updated peer data
PeerData(PI, PeerData),
}
type ConnsMap<PI> = HashMap<PI, HashSet<TopicId>>;
type Outbox<PI> = Vec<OutEvent<PI>>;
enum InEventMapped<PI> {
All(topic::InEvent<PI>),
TopicEvent(TopicId, topic::InEvent<PI>),
}
impl<PI> From<InEvent<PI>> for InEventMapped<PI> {
fn from(event: InEvent<PI>) -> InEventMapped<PI> {
match event {
InEvent::RecvMessage(from, Message { topic, message }) => {
Self::TopicEvent(topic, topic::InEvent::RecvMessage(from, message))
}
InEvent::Command(topic, command) => {
Self::TopicEvent(topic, topic::InEvent::Command(command))
}
InEvent::TimerExpired(Timer { topic, timer }) => {
Self::TopicEvent(topic, topic::InEvent::TimerExpired(timer))
}
InEvent::PeerDisconnected(peer) => Self::All(topic::InEvent::PeerDisconnected(peer)),
InEvent::UpdatePeerData(data) => Self::All(topic::InEvent::UpdatePeerData(data)),
}
}
}
/// The state of the `iroh-gossip` protocol.
///
/// The implementation works as an IO-less state machine. The implementer injects events through
/// [`Self::handle`], which returns an iterator of [`OutEvent`]s to be processed.
///
/// This struct contains a map of [`topic::State`] for each topic that was joined. It mostly acts as
/// a forwarder of [`InEvent`]s to matching topic state. Each topic's state is completely
/// independent; thus the actual protocol logic lives with [`topic::State`].
#[derive(Debug)]
pub struct State<PI, R> {
me: PI,
me_data: PeerData,
config: Config,
rng: R,
states: HashMap<TopicId, topic::State<PI, R>>,
outbox: Outbox<PI>,
peer_topics: ConnsMap<PI>,
}
impl<PI: PeerIdentity, R: Rng + Clone> State<PI, R> {
/// Create a new protocol state instance.
///
/// `me` is the [`PeerIdentity`] of the local node, `peer_data` is the initial [`PeerData`]
/// (which can be updated over time).
/// For the protocol to perform as recommended in the papers, the [`Config`] should be
/// identical for all nodes in the network.
///
/// ## Panics
///
/// Panics if [`Config::max_message_size`] is below [`MIN_MAX_MESSAGE_SIZE`].
pub fn new(me: PI, me_data: PeerData, config: Config, rng: R) -> Self {
assert!(
config.max_message_size >= MIN_MAX_MESSAGE_SIZE,
"max_message_size must be at least {MIN_MAX_MESSAGE_SIZE}"
);
Self {
me,
me_data,
config,
rng,
states: Default::default(),
outbox: Default::default(),
peer_topics: Default::default(),
}
}
/// Get a reference to the node's [`PeerIdentity`]
pub fn me(&self) -> &PI {
&self.me
}
/// Get a reference to the protocol state for a topic.
pub fn state(&self, topic: &TopicId) -> Option<&topic::State<PI, R>> {
self.states.get(topic)
}
/// Resets the tracked stats for a topic.
pub fn reset_stats(&mut self, topic: &TopicId) {
if let Some(state) = self.states.get_mut(topic) {
state.reset_stats();
}
}
/// Get an iterator of all joined topics.
pub fn topics(&self) -> impl Iterator<Item = &TopicId> {
self.states.keys()
}
/// Get an iterator for the states of all joined topics.
pub fn states(&self) -> impl Iterator<Item = (&TopicId, &topic::State<PI, R>)> {
self.states.iter()
}
/// Check if a topic has any active (connected) peers.
pub fn has_active_peers(&self, topic: &TopicId) -> bool {
self.state(topic)
.map(|s| s.has_active_peers())
.unwrap_or(false)
}
/// Returns the maximum message size configured in the gossip protocol.
pub fn max_message_size(&self) -> usize {
self.config.max_message_size
}
/// Handle an [`InEvent`]
///
/// This returns an iterator of [`OutEvent`]s that must be processed.
pub fn handle(
&mut self,
event: InEvent<PI>,
now: Instant,
metrics: Option<&Metrics>,
) -> impl Iterator<Item = OutEvent<PI>> + '_ + use<'_, PI, R> {
trace!("in : {event:?}");
if let Some(metrics) = &metrics {
track_in_event(&event, metrics);
}
let event: InEventMapped<PI> = event.into();
match event {
InEventMapped::TopicEvent(topic, event) => {
// when receiving a join command, initialize state if it doesn't exist
if matches!(&event, topic::InEvent::Command(Command::Join(_peers))) {
if let hash_map::Entry::Vacant(e) = self.states.entry(topic) {
e.insert(topic::State::with_rng(
self.me,
Some(self.me_data.clone()),
self.config.clone(),
self.rng.clone(),
));
}
}
// when receiving a quit command, note this and drop the topic state after
// processing this last event
let quit = matches!(event, topic::InEvent::Command(Command::Quit));
// pass the event to the state handler
if let Some(state) = self.states.get_mut(&topic) {
// when receiving messages, update our conn map to take note that this topic state may want
// to keep this connection
if let topic::InEvent::RecvMessage(from, _message) = &event {
self.peer_topics.entry(*from).or_default().insert(topic);
}
let out = state.handle(event, now);
for event in out {
handle_out_event(topic, event, &mut self.peer_topics, &mut self.outbox);
}
}
if quit {
self.states.remove(&topic);
}
}
// when a peer disconnected on the network level, forward event to all states
InEventMapped::All(event) => {
if let topic::InEvent::UpdatePeerData(data) = &event {
self.me_data = data.clone();
}
for (topic, state) in self.states.iter_mut() {
let out = state.handle(event.clone(), now);
for event in out {
handle_out_event(*topic, event, &mut self.peer_topics, &mut self.outbox);
}
}
}
}
// track metrics
if let Some(metrics) = &metrics {
track_out_events(&self.outbox, metrics);
}
self.outbox.drain(..)
}
}
fn handle_out_event<PI: PeerIdentity>(
topic: TopicId,
event: topic::OutEvent<PI>,
conns: &mut ConnsMap<PI>,
outbox: &mut Outbox<PI>,
) {
trace!("out: {event:?}");
match event {
topic::OutEvent::SendMessage(to, message) => {
outbox.push(OutEvent::SendMessage(to, Message { topic, message }))
}
topic::OutEvent::EmitEvent(event) => outbox.push(OutEvent::EmitEvent(topic, event)),
topic::OutEvent::ScheduleTimer(delay, timer) => {
outbox.push(OutEvent::ScheduleTimer(delay, Timer { topic, timer }))
}
topic::OutEvent::DisconnectPeer(peer) => {
let empty = conns
.get_mut(&peer)
.map(|list| list.remove(&topic) || list.is_empty())
.unwrap_or(false);
if empty {
conns.remove(&peer);
outbox.push(OutEvent::DisconnectPeer(peer));
}
}
topic::OutEvent::PeerData(peer, data) => outbox.push(OutEvent::PeerData(peer, data)),
}
}
fn track_out_events<PI: Serialize>(events: &[OutEvent<PI>], metrics: &Metrics) {
for event in events {
match event {
OutEvent::SendMessage(_to, message) => match message.kind() {
MessageKind::Data => {
metrics.msgs_data_sent.inc();
metrics
.msgs_data_sent_size
.inc_by(message.size().unwrap_or(0) as u64);
}
MessageKind::Control => {
metrics.msgs_ctrl_sent.inc();
metrics
.msgs_ctrl_sent_size
.inc_by(message.size().unwrap_or(0) as u64);
}
},
OutEvent::EmitEvent(_topic, event) => match event {
super::Event::NeighborUp(_peer) => {
metrics.neighbor_up.inc();
}
super::Event::NeighborDown(_peer) => {
metrics.neighbor_down.inc();
}
_ => {}
},
_ => {}
}
}
}
fn track_in_event<PI: Serialize>(event: &InEvent<PI>, metrics: &Metrics) {
if let InEvent::RecvMessage(_from, message) = event {
match message.kind() {
MessageKind::Data => {
metrics.msgs_data_recv.inc();
metrics
.msgs_data_recv_size
.inc_by(message.size().unwrap_or(0) as u64);
}
MessageKind::Control => {
metrics.msgs_ctrl_recv.inc();
metrics
.msgs_ctrl_recv_size
.inc_by(message.size().unwrap_or(0) as u64);
}
}
}
}

View file

@ -0,0 +1,363 @@
//! This module contains the implementation of the gossiping protocol for an individual topic
use std::collections::VecDeque;
use bytes::Bytes;
use derive_more::From;
use n0_future::time::{Duration, Instant};
use rand::Rng;
use serde::{Deserialize, Serialize};
use super::{
hyparview::{self, InEvent as SwarmIn},
plumtree::{self, GossipEvent, InEvent as GossipIn, Scope},
state::MessageKind,
PeerData, PeerIdentity, DEFAULT_MAX_MESSAGE_SIZE,
};
use crate::proto::MIN_MAX_MESSAGE_SIZE;
/// Input event to the topic state handler.
#[derive(Clone, Debug)]
pub enum InEvent<PI> {
/// Message received from the network.
RecvMessage(PI, Message<PI>),
/// Execute a command from the application.
Command(Command<PI>),
/// Trigger a previously scheduled timer.
TimerExpired(Timer<PI>),
/// Peer disconnected on the network level.
PeerDisconnected(PI),
/// Update the opaque peer data about yourself.
UpdatePeerData(PeerData),
}
/// An output event from the state handler.
#[derive(Debug, PartialEq, Eq)]
pub enum OutEvent<PI> {
/// Send a message on the network
SendMessage(PI, Message<PI>),
/// Emit an event to the application.
EmitEvent(Event<PI>),
/// Schedule a timer. The runtime is responsible for sending an [InEvent::TimerExpired]
/// after the duration.
ScheduleTimer(Duration, Timer<PI>),
/// Close the connection to a peer on the network level.
DisconnectPeer(PI),
/// Emitted when new [`PeerData`] was received for a peer.
PeerData(PI, PeerData),
}
impl<PI> From<hyparview::OutEvent<PI>> for OutEvent<PI> {
fn from(event: hyparview::OutEvent<PI>) -> Self {
use hyparview::OutEvent::*;
match event {
SendMessage(to, message) => Self::SendMessage(to, message.into()),
ScheduleTimer(delay, timer) => Self::ScheduleTimer(delay, timer.into()),
DisconnectPeer(peer) => Self::DisconnectPeer(peer),
EmitEvent(event) => Self::EmitEvent(event.into()),
PeerData(peer, data) => Self::PeerData(peer, data),
}
}
}
impl<PI> From<plumtree::OutEvent<PI>> for OutEvent<PI> {
fn from(event: plumtree::OutEvent<PI>) -> Self {
use plumtree::OutEvent::*;
match event {
SendMessage(to, message) => Self::SendMessage(to, message.into()),
ScheduleTimer(delay, timer) => Self::ScheduleTimer(delay, timer.into()),
EmitEvent(event) => Self::EmitEvent(event.into()),
}
}
}
/// A trait for a concrete type to push `OutEvent`s to.
///
/// The implementation is generic over this trait, which allows the upper layer to supply a
/// container of their choice for `OutEvent`s emitted from the protocol state.
pub trait IO<PI: Clone> {
/// Push an event in the IO container
fn push(&mut self, event: impl Into<OutEvent<PI>>);
/// Push all events from an iterator into the IO container
fn push_from_iter(&mut self, iter: impl IntoIterator<Item = impl Into<OutEvent<PI>>>) {
for event in iter.into_iter() {
self.push(event);
}
}
}
/// A protocol message for a particular topic
#[derive(From, Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum Message<PI> {
/// A message of the swarm membership layer
Swarm(hyparview::Message<PI>),
/// A message of the gossip broadcast layer
Gossip(plumtree::Message),
}
impl<PI> Message<PI> {
/// Get the kind of this message
pub fn kind(&self) -> MessageKind {
match self {
Message::Swarm(_) => MessageKind::Control,
Message::Gossip(message) => match message {
plumtree::Message::Gossip(_) => MessageKind::Data,
_ => MessageKind::Control,
},
}
}
/// Returns `true` if this is a disconnect message (which is the last message sent to a peer per topic).
pub fn is_disconnect(&self) -> bool {
matches!(self, Message::Swarm(hyparview::Message::Disconnect(_)))
}
}
/// An event to be emitted to the application for a particular topic.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Serialize, Deserialize)]
pub enum Event<PI> {
/// We have a new, direct neighbor in the swarm membership layer for this topic
NeighborUp(PI),
/// We dropped direct neighbor in the swarm membership layer for this topic
NeighborDown(PI),
/// A gossip message was received for this topic
Received(GossipEvent<PI>),
}
impl<PI> From<hyparview::Event<PI>> for Event<PI> {
fn from(value: hyparview::Event<PI>) -> Self {
match value {
hyparview::Event::NeighborUp(peer) => Self::NeighborUp(peer),
hyparview::Event::NeighborDown(peer) => Self::NeighborDown(peer),
}
}
}
impl<PI> From<plumtree::Event<PI>> for Event<PI> {
fn from(value: plumtree::Event<PI>) -> Self {
match value {
plumtree::Event::Received(event) => Self::Received(event),
}
}
}
/// A timer to be registered for a particular topic.
///
/// This should be treated as an opaque value by the implementer and, once emitted, simply returned
/// to the protocol through [`InEvent::TimerExpired`].
#[derive(Clone, From, Debug, PartialEq, Eq)]
pub enum Timer<PI> {
/// A timer for the swarm layer
Swarm(hyparview::Timer<PI>),
/// A timer for the gossip layer
Gossip(plumtree::Timer),
}
/// A command to the protocol state for a particular topic.
#[derive(Clone, derive_more::Debug)]
pub enum Command<PI> {
/// Join this topic and connect to peers.
///
/// If the list of peers is empty, will prepare the state and accept incoming join requests,
/// but only become operational after the first join request by another peer.
Join(Vec<PI>),
/// Broadcast a message for this topic.
Broadcast(#[debug("<{}b>", _0.len())] Bytes, Scope),
/// Leave this topic and drop all state.
Quit,
}
impl<PI: Clone> IO<PI> for VecDeque<OutEvent<PI>> {
fn push(&mut self, event: impl Into<OutEvent<PI>>) {
self.push_back(event.into())
}
}
/// Protocol configuration
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Configuration for the swarm membership layer
pub membership: hyparview::Config,
/// Configuration for the gossip broadcast layer
pub broadcast: plumtree::Config,
/// Max message size in bytes.
///
/// This size should be the same across a network to ensure all nodes can transmit and read large messages.
///
/// At minimum, this size should be large enough to send gossip control messages. This can vary, depending on the size of the [`PeerIdentity`] you use and the size of the [`PeerData`] you transmit in your messages.
///
/// The default is [`DEFAULT_MAX_MESSAGE_SIZE`].
pub max_message_size: usize,
}
impl Default for Config {
fn default() -> Self {
Self {
membership: Default::default(),
broadcast: Default::default(),
max_message_size: DEFAULT_MAX_MESSAGE_SIZE,
}
}
}
/// The topic state maintains the swarm membership and broadcast tree for a particular topic.
#[derive(Debug)]
pub struct State<PI, R> {
me: PI,
pub(crate) swarm: hyparview::State<PI, R>,
pub(crate) gossip: plumtree::State<PI>,
outbox: VecDeque<OutEvent<PI>>,
stats: Stats,
}
impl<PI: PeerIdentity> State<PI, rand::rngs::ThreadRng> {
/// Initialize the local state with the default random number generator.
///
/// ## Panics
///
/// Panics if [`Config::max_message_size`] is below [`MIN_MAX_MESSAGE_SIZE`].
pub fn new(me: PI, me_data: Option<PeerData>, config: Config) -> Self {
Self::with_rng(me, me_data, config, rand::rng())
}
}
impl<PI, R> State<PI, R> {
/// The address of your local endpoint.
pub fn endpoint(&self) -> &PI {
&self.me
}
}
impl<PI: PeerIdentity, R: Rng> State<PI, R> {
/// Initialize the local state with a custom random number generator.
///
/// ## Panics
///
/// Panics if [`Config::max_message_size`] is below [`MIN_MAX_MESSAGE_SIZE`].
pub fn with_rng(me: PI, me_data: Option<PeerData>, config: Config, rng: R) -> Self {
assert!(
config.max_message_size >= MIN_MAX_MESSAGE_SIZE,
"max_message_size must be at least {MIN_MAX_MESSAGE_SIZE}"
);
let max_payload_size =
config.max_message_size - super::Message::<PI>::postcard_header_size();
Self {
swarm: hyparview::State::new(me, me_data, config.membership, rng),
gossip: plumtree::State::new(me, config.broadcast, max_payload_size),
me,
outbox: VecDeque::new(),
stats: Stats::default(),
}
}
/// Handle an incoming event.
///
/// Returns an iterator of outgoing events that must be processed by the application.
pub fn handle(
&mut self,
event: InEvent<PI>,
now: Instant,
) -> impl Iterator<Item = OutEvent<PI>> + '_ {
let io = &mut self.outbox;
// Process the event, store out events in outbox.
match event {
InEvent::Command(command) => match command {
Command::Join(peers) => {
for peer in peers {
self.swarm.handle(SwarmIn::RequestJoin(peer), io);
}
}
Command::Broadcast(data, scope) => {
self.gossip
.handle(GossipIn::Broadcast(data, scope), now, io)
}
Command::Quit => self.swarm.handle(SwarmIn::Quit, io),
},
InEvent::RecvMessage(from, message) => {
self.stats.messages_received += 1;
match message {
Message::Swarm(message) => {
self.swarm.handle(SwarmIn::RecvMessage(from, message), io)
}
Message::Gossip(message) => {
self.gossip
.handle(GossipIn::RecvMessage(from, message), now, io)
}
}
}
InEvent::TimerExpired(timer) => match timer {
Timer::Swarm(timer) => self.swarm.handle(SwarmIn::TimerExpired(timer), io),
Timer::Gossip(timer) => self.gossip.handle(GossipIn::TimerExpired(timer), now, io),
},
InEvent::PeerDisconnected(peer) => {
self.swarm.handle(SwarmIn::PeerDisconnected(peer), io);
self.gossip.handle(GossipIn::NeighborDown(peer), now, io);
}
InEvent::UpdatePeerData(data) => self.swarm.handle(SwarmIn::UpdatePeerData(data), io),
}
// Forward NeighborUp and NeighborDown events from hyparview to plumtree
let mut io = VecDeque::new();
for event in self.outbox.iter() {
match event {
OutEvent::EmitEvent(Event::NeighborUp(peer)) => {
self.gossip
.handle(GossipIn::NeighborUp(*peer), now, &mut io)
}
OutEvent::EmitEvent(Event::NeighborDown(peer)) => {
self.gossip
.handle(GossipIn::NeighborDown(*peer), now, &mut io)
}
_ => {}
}
}
// Note that this is a no-op because plumtree::handle(NeighborUp | NeighborDown)
// above does not emit any OutEvents.
self.outbox.extend(io.drain(..));
// Update sent message counter
self.stats.messages_sent += self
.outbox
.iter()
.filter(|event| matches!(event, OutEvent::SendMessage(_, _)))
.count();
self.outbox.drain(..)
}
/// Get stats on how many messages were sent and received.
// TODO: Remove/replace with metrics?
pub fn stats(&self) -> &Stats {
&self.stats
}
/// Reset all statistics.
pub fn reset_stats(&mut self) {
self.gossip.stats = Default::default();
self.swarm.stats = Default::default();
self.stats = Default::default();
}
/// Get statistics for the gossip broadcast state
///
/// TODO: Remove/replace with metrics?
pub fn gossip_stats(&self) -> &plumtree::Stats {
self.gossip.stats()
}
/// Check if this topic has any active (connected) peers.
pub fn has_active_peers(&self) -> bool {
!self.swarm.active_view.is_empty()
}
}
/// Statistics for the protocol state of a topic
#[derive(Clone, Debug, Default)]
pub struct Stats {
/// Number of messages sent
pub messages_sent: usize,
/// Number of messages received
pub messages_received: usize,
}

View file

@ -0,0 +1,532 @@
//! Utilities used in the protocol implementation
use std::{
collections::{hash_map, BinaryHeap, HashMap},
hash::Hash,
};
use n0_future::time::Instant;
use rand::{
seq::{IteratorRandom, SliceRandom},
Rng,
};
/// Implement methods, display, debug and conversion traits for 32 byte identifiers.
macro_rules! idbytes_impls {
($ty:ty, $name:expr) => {
impl $ty {
/// Create from a byte array.
pub const fn from_bytes(bytes: [u8; 32]) -> Self {
Self(bytes)
}
/// Get as byte slice.
pub fn as_bytes(&self) -> &[u8; 32] {
&self.0
}
}
impl<T: ::std::convert::Into<[u8; 32]>> ::std::convert::From<T> for $ty {
fn from(value: T) -> Self {
Self::from_bytes(value.into())
}
}
impl ::std::fmt::Display for $ty {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", ::hex::encode(&self.0))
}
}
impl ::std::fmt::Debug for $ty {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}({})", $name, ::hex::encode(&self.0))
}
}
impl ::std::str::FromStr for $ty {
type Err = ::hex::FromHexError;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let mut bytes = [0u8; 32];
::hex::decode_to_slice(s, &mut bytes)?;
Ok(Self::from_bytes(bytes))
}
}
impl ::std::convert::AsRef<[u8]> for $ty {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl ::std::convert::AsRef<[u8; 32]> for $ty {
fn as_ref(&self) -> &[u8; 32] {
&self.0
}
}
};
}
pub(crate) use idbytes_impls;
/// A hash set where the iteration order of the values is independent of their
/// hash values.
///
/// This is wrapper around [indexmap::IndexSet] which couple of utility methods
/// to randomly select elements from the set.
#[derive(Default, Debug, Clone, derive_more::Deref)]
pub(crate) struct IndexSet<T> {
inner: indexmap::IndexSet<T>,
}
impl<T: Hash + Eq> PartialEq for IndexSet<T> {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl<T: Hash + Eq + PartialEq> IndexSet<T> {
pub fn new() -> Self {
Self {
inner: indexmap::IndexSet::new(),
}
}
pub fn insert(&mut self, value: T) -> bool {
self.inner.insert(value)
}
/// Remove a random element from the set.
pub fn remove_random<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Option<T> {
self.pick_random_index(rng)
.and_then(|idx| self.inner.shift_remove_index(idx))
}
/// Pick a random element from the set.
pub fn pick_random<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<&T> {
self.pick_random_index(rng)
.and_then(|idx| self.inner.get_index(idx))
}
/// Pick a random element from the set, but not any of the elements in `without`.
pub fn pick_random_without<R: Rng + ?Sized>(&self, without: &[&T], rng: &mut R) -> Option<&T> {
self.iter().filter(|x| !without.contains(x)).choose(rng)
}
/// Pick a random index for an element in the set.
pub fn pick_random_index<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<usize> {
if self.is_empty() {
None
} else {
Some(rng.random_range(0..self.inner.len()))
}
}
/// Remove an element from the set.
///
/// NOTE: the value is removed by swapping it with the last element of the set and popping it off.
/// **This modifies the order of element by moving the last element**
pub fn remove(&mut self, value: &T) -> Option<T> {
self.inner.swap_remove_full(value).map(|(_i, v)| v)
}
/// Remove an element from the set by its index.
///
/// NOTE: the value is removed by swapping it with the last element of the set and popping it off.
/// **This modifies the order of element by moving the last element**
pub fn remove_index(&mut self, index: usize) -> Option<T> {
self.inner.swap_remove_index(index)
}
/// Create an iterator over the set in the order of insertion, while skipping the element in
/// `without`.
pub fn iter_without<'a>(&'a self, value: &'a T) -> impl Iterator<Item = &'a T> {
self.iter().filter(move |x| *x != value)
}
}
impl<T> IndexSet<T>
where
T: Hash + Eq + Clone,
{
/// Create a vector of all elements in the set in random order.
pub fn shuffled<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec<T> {
let mut items: Vec<_> = self.inner.iter().cloned().collect();
items.shuffle(rng);
items
}
/// Create a vector of all elements in the set in random order, and shorten to
/// the first `len` elements after shuffling.
pub fn shuffled_and_capped<R: Rng + ?Sized>(&self, len: usize, rng: &mut R) -> Vec<T> {
let mut items = self.shuffled(rng);
items.truncate(len);
items
}
/// Create a vector of the elements in the set in random order while omitting
/// the elements in `without`.
pub fn shuffled_without<R: Rng + ?Sized>(&self, without: &[&T], rng: &mut R) -> Vec<T> {
let mut items = self
.inner
.iter()
.filter(|x| !without.contains(x))
.cloned()
.collect::<Vec<_>>();
items.shuffle(rng);
items
}
/// Create a vector of the elements in the set in random order while omitting
/// the elements in `without`, and shorten to the first `len` elements.
pub fn shuffled_without_and_capped<R: Rng + ?Sized>(
&self,
without: &[&T],
len: usize,
rng: &mut R,
) -> Vec<T> {
let mut items = self.shuffled_without(without, rng);
items.truncate(len);
items
}
}
impl<T> IntoIterator for IndexSet<T> {
type Item = T;
type IntoIter = <indexmap::IndexSet<T> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.inner.into_iter()
}
}
impl<T> FromIterator<T> for IndexSet<T>
where
T: Hash + Eq,
{
fn from_iter<I: IntoIterator<Item = T>>(iterable: I) -> Self {
IndexSet {
inner: indexmap::IndexSet::from_iter(iterable),
}
}
}
/// A [`BinaryHeap`] with entries sorted by [`Instant`]. Allows to process expired items.
#[derive(Debug)]
pub struct TimerMap<T> {
heap: BinaryHeap<TimerMapEntry<T>>,
seq: u64,
}
// Can't derive default because we don't want a `T: Default` bound.
impl<T> Default for TimerMap<T> {
fn default() -> Self {
Self {
heap: Default::default(),
seq: 0,
}
}
}
impl<T> TimerMap<T> {
/// Create a new, empty TimerMap.
pub fn new() -> Self {
Self::default()
}
/// Insert a new entry at the specified instant.
pub fn insert(&mut self, instant: Instant, item: T) {
let seq = self.seq;
self.seq += 1;
let entry = TimerMapEntry {
seq,
time: instant,
item,
};
self.heap.push(entry);
}
/// Remove and return all entries before and equal to `from`.
pub fn drain_until(
&mut self,
from: &Instant,
) -> impl Iterator<Item = (Instant, T)> + '_ + use<'_, T> {
let from = *from;
std::iter::from_fn(move || self.pop_before(from))
}
/// Pop the first entry, if equal or before `limit`.
pub fn pop_before(&mut self, limit: Instant) -> Option<(Instant, T)> {
match self.heap.peek() {
Some(item) if item.time <= limit => self.heap.pop().map(|item| (item.time, item.item)),
_ => None,
}
}
/// Get a reference to the earliest entry in the `TimerMap`.
pub fn first(&self) -> Option<&Instant> {
self.heap.peek().map(|x| &x.time)
}
#[cfg(test)]
fn to_vec(&self) -> Vec<(Instant, T)>
where
T: Clone,
{
self.heap
.clone()
.into_sorted_vec()
.into_iter()
.rev()
.map(|x| (x.time, x.item))
.collect()
}
}
#[derive(Debug, Clone)]
struct TimerMapEntry<T> {
time: Instant,
seq: u64,
item: T,
}
impl<T> PartialEq for TimerMapEntry<T> {
fn eq(&self, other: &Self) -> bool {
self.time == other.time && self.seq == other.seq
}
}
impl<T> Eq for TimerMapEntry<T> {}
impl<T> PartialOrd for TimerMapEntry<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for TimerMapEntry<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.time
.cmp(&other.time)
.reverse()
.then_with(|| self.seq.cmp(&other.seq).reverse())
}
}
/// A hash map where entries expire after a time
#[derive(Debug)]
pub struct TimeBoundCache<K, V> {
map: HashMap<K, (Instant, V)>,
expiry: TimerMap<K>,
}
impl<K, V> Default for TimeBoundCache<K, V> {
fn default() -> Self {
Self {
map: Default::default(),
expiry: Default::default(),
}
}
}
impl<K: Hash + Eq + Clone, V> TimeBoundCache<K, V> {
/// Insert an item into the cache, marked with an expiration time.
pub fn insert(&mut self, key: K, value: V, expires: Instant) {
self.map.insert(key.clone(), (expires, value));
self.expiry.insert(expires, key);
}
/// Returns `true` if the map contains a value for the specified key.
pub fn contains_key(&self, key: &K) -> bool {
self.map.contains_key(key)
}
/// Get the number of entries in the cache.
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the map contains no elements.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Get an item from the cache.
pub fn get(&self, key: &K) -> Option<&V> {
self.map.get(key).map(|(_expires, value)| value)
}
/// Get the expiration time for an item.
pub fn expires(&self, key: &K) -> Option<&Instant> {
self.map.get(key).map(|(expires, _value)| expires)
}
/// Iterate over all items in the cache.
pub fn iter(&self) -> impl Iterator<Item = (&K, &V, &Instant)> {
self.map.iter().map(|(k, (expires, v))| (k, v, expires))
}
/// Remove all entries with an expiry instant lower or equal to `instant`.
///
/// Returns the number of items that were removed.
pub fn expire_until(&mut self, instant: Instant) -> usize {
let drain = self.expiry.drain_until(&instant);
let mut count = 0;
for (time, key) in drain {
match self.map.entry(key) {
hash_map::Entry::Occupied(entry) if entry.get().0 == time => {
// If the entry's time matches that of the item we are draining from the expiry list,
// remove the entry from the map and increase the count of items we removed.
entry.remove();
count += 1;
}
hash_map::Entry::Occupied(_entry) => {
// If the entry's time does not match the time of the item we are draining,
// do not remove the entry: It means that it was re-added with a later time.
}
hash_map::Entry::Vacant(_) => {
// If the entry is not in the map, it means that it was already removed,
// which can happen if it was inserted multiple times.
}
}
}
count
}
}
#[cfg(test)]
mod test {
use std::str::FromStr;
use n0_future::time::{Duration, Instant};
use rand::SeedableRng;
use super::{IndexSet, TimeBoundCache, TimerMap};
fn test_rng() -> rand_chacha::ChaCha12Rng {
rand_chacha::ChaCha12Rng::seed_from_u64(42)
}
#[test]
fn indexset() {
let elems = [1, 2, 3, 4];
let set = IndexSet::from_iter(elems);
let x = set.shuffled(&mut test_rng());
assert_eq!(x, vec![2, 1, 4, 3]);
let x = set.shuffled_and_capped(2, &mut test_rng());
assert_eq!(x, vec![2, 1]);
let x = set.shuffled_without(&[&1], &mut test_rng());
assert_eq!(x, vec![3, 2, 4]);
let x = set.shuffled_without_and_capped(&[&1], 2, &mut test_rng());
assert_eq!(x, vec![3, 2]);
// recreate the rng - otherwise we get failures on some architectures when cross-compiling,
// likely due to usize differences pulling different amounts of randomness.
let x = set.pick_random(&mut test_rng());
assert_eq!(x, Some(&1));
let x = set.pick_random_without(&[&3], &mut test_rng());
assert_eq!(x, Some(&4));
let mut set = set;
set.remove_random(&mut test_rng());
assert_eq!(set, IndexSet::from_iter([2, 3, 4]));
}
#[test]
fn timer_map() {
let mut map = TimerMap::new();
let now = Instant::now();
let times = [
now - Duration::from_secs(1),
now,
now + Duration::from_secs(1),
now + Duration::from_secs(2),
];
map.insert(times[0], -1);
map.insert(times[0], -2);
map.insert(times[1], 0);
map.insert(times[2], 1);
map.insert(times[3], 2);
map.insert(times[3], 3);
assert_eq!(
map.to_vec(),
vec![
(times[0], -1),
(times[0], -2),
(times[1], 0),
(times[2], 1),
(times[3], 2),
(times[3], 3)
]
);
assert_eq!(map.first(), Some(&times[0]));
let drain = map.drain_until(&now);
assert_eq!(
drain.collect::<Vec<_>>(),
vec![(times[0], -1), (times[0], -2), (times[1], 0),]
);
assert_eq!(
map.to_vec(),
vec![(times[2], 1), (times[3], 2), (times[3], 3)]
);
let drain = map.drain_until(&now);
assert_eq!(drain.collect::<Vec<_>>(), vec![]);
let drain = map.drain_until(&(now + Duration::from_secs(10)));
assert_eq!(
drain.collect::<Vec<_>>(),
vec![(times[2], 1), (times[3], 2), (times[3], 3)]
);
}
#[test]
fn hex() {
#[derive(Eq, PartialEq)]
struct Id([u8; 32]);
idbytes_impls!(Id, "Id");
let id: Id = [1u8; 32].into();
assert_eq!(id, Id::from_str(&format!("{id}")).unwrap());
assert_eq!(
&format!("{id}"),
"0101010101010101010101010101010101010101010101010101010101010101"
);
assert_eq!(
&format!("{id:?}"),
"Id(0101010101010101010101010101010101010101010101010101010101010101)"
);
assert_eq!(id.as_bytes(), &[1u8; 32]);
}
#[test]
fn time_bound_cache() {
let mut cache = TimeBoundCache::default();
let t0 = Instant::now();
let t1 = t0 + Duration::from_secs(1);
let t2 = t0 + Duration::from_secs(2);
cache.insert(1, 10, t0);
cache.insert(2, 20, t1);
cache.insert(3, 30, t1);
cache.insert(4, 40, t2);
assert_eq!(cache.get(&2), Some(&20));
assert_eq!(cache.len(), 4);
let removed = cache.expire_until(t1);
assert_eq!(removed, 3);
assert_eq!(cache.len(), 1);
assert_eq!(cache.get(&2), None);
assert_eq!(cache.get(&4), Some(&40));
let t3 = t2 + Duration::from_secs(1);
cache.insert(5, 50, t2);
assert_eq!(cache.expires(&5), Some(&t2));
cache.insert(5, 50, t3);
assert_eq!(cache.expires(&5), Some(&t3));
cache.expire_until(t2);
assert_eq!(cache.get(&4), None);
assert_eq!(cache.get(&5), Some(&50));
}
}

View file

@ -0,0 +1,134 @@
//! Tests that use the [`iroh_gossip::proto::sim::Simulator`].
use std::{env, fmt, str::FromStr, time::Duration};
use iroh_gossip::proto::{
sim::{BootstrapMode, LatencyConfig, NetworkConfig, Simulator, SimulatorConfig},
Config,
};
#[test]
// #[traced_test]
fn big_hyparview() {
tracing_subscriber::fmt::try_init().ok();
let mut proto = Config::default();
proto.membership.shuffle_interval = Duration::from_secs(5);
let config = SimulatorConfig::from_env();
let bootstrap = BootstrapMode::default();
let network_config = NetworkConfig {
proto,
latency: LatencyConfig::default(),
};
let mut simulator = Simulator::new(config, network_config);
simulator.bootstrap(bootstrap);
let state = simulator.report();
println!("{state}");
assert!(!state.has_peers_with_no_neighbors());
}
#[test]
// #[traced_test]
fn big_multiple_sender() {
tracing_subscriber::fmt::try_init().ok();
let network_config = NetworkConfig::default();
let config = SimulatorConfig::from_env();
let bootstrap = BootstrapMode::default();
let mut simulator = Simulator::new(config, network_config);
simulator.bootstrap(bootstrap);
let rounds = read_var("ROUNDS", 30);
for i in 0..rounds {
let from = simulator.random_peer();
let message = format!("m{i}").into_bytes().into();
let messages = vec![(from, message)];
simulator.gossip_round(messages);
}
let avg = simulator.round_stats_average().mean;
println!(
"average with {} peers after {} rounds:\n{}",
simulator.peer_count(),
rounds,
avg
);
println!("{}", simulator.report());
assert!(avg.ldh < 18.);
assert!(avg.rmr < 1.);
assert_eq!(avg.missed, 0.0);
}
#[test]
// #[traced_test]
fn big_single_sender() {
tracing_subscriber::fmt::try_init().ok();
let network_config = NetworkConfig::default();
let config = SimulatorConfig::from_env();
let bootstrap = BootstrapMode::default();
let rounds = read_var("ROUNDS", 30);
let mut simulator = Simulator::new(config, network_config);
simulator.bootstrap(bootstrap);
let from = simulator.random_peer();
for i in 0..rounds {
let message = format!("m{i}").into_bytes().into();
let messages = vec![(from, message)];
simulator.gossip_round(messages);
}
let avg = simulator.round_stats_average().mean;
println!(
"average with {} peers after {} rounds:\n{}",
simulator.peer_count(),
rounds,
avg
);
println!("{}", simulator.report());
assert!(avg.ldh < 15.);
assert!(avg.rmr < 0.2);
assert_eq!(avg.missed, 0.0);
}
#[test]
// #[traced_test]
fn big_burst() {
tracing_subscriber::fmt::try_init().ok();
let network_config = NetworkConfig::default();
let config = SimulatorConfig::from_env();
let bootstrap = BootstrapMode::default();
let rounds = read_var("ROUNDS", 5);
let mut simulator = Simulator::new(config, network_config);
simulator.bootstrap(bootstrap);
let messages_per_peer = read_var("MESSAGES_PER_PEER", 1);
for i in 0..rounds {
let mut messages = vec![];
for id in simulator.network.peer_ids() {
for j in 0..messages_per_peer {
let message: bytes::Bytes = format!("{i}:{j}.{id}").into_bytes().into();
messages.push((id, message));
}
}
simulator.gossip_round(messages);
}
let avg = simulator.round_stats_average().mean;
println!(
"average with {} peers after {} rounds:\n{}",
simulator.peer_count(),
rounds,
avg
);
println!("{}", simulator.report());
assert!(avg.ldh < 30.);
assert!(avg.rmr < 3.);
assert_eq!(avg.missed, 0.0);
}
fn read_var<T: FromStr<Err: fmt::Display + fmt::Debug>>(name: &str, default: T) -> T {
env::var(name)
.map(|x| {
x.parse()
.unwrap_or_else(|_| panic!("Failed to parse environment variable {name}"))
})
.unwrap_or(default)
}