mirror of
https://github.com/nestriness/warp.git
synced 2025-12-15 03:15:41 +02:00
This works
This commit is contained in:
1315
moq-transport/Cargo.lock
generated
Normal file
1315
moq-transport/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
52
moq-transport/Cargo.toml
Normal file
52
moq-transport/Cargo.toml
Normal file
@@ -0,0 +1,52 @@
|
||||
[package]
|
||||
name = "moq-transport"
|
||||
description = "Media over QUIC"
|
||||
authors = ["Luke Curley"]
|
||||
repository = "https://github.com/kixelated/moq-rs"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
|
||||
keywords = ["quic", "http3", "webtransport", "media", "live"]
|
||||
categories = ["multimedia", "network-programming", "web-programming"]
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bytes = "1"
|
||||
thiserror = "1"
|
||||
tokio = { version = "1", features = ["macros", "io-util", "sync"] }
|
||||
log = "0.4"
|
||||
indexmap = "2"
|
||||
|
||||
quinn = "0.10"
|
||||
webtransport-quinn = "0.6.1"
|
||||
#webtransport-quinn = { path = "../../webtransport-rs/webtransport-quinn" }
|
||||
|
||||
async-trait = "0.1"
|
||||
paste = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
# QUIC
|
||||
url = "2"
|
||||
|
||||
# Crypto
|
||||
rustls = { version = "0.21", features = ["dangerous_configuration"] }
|
||||
rustls-native-certs = "0.6"
|
||||
rustls-pemfile = "1"
|
||||
|
||||
# Async stuff
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
# CLI, logging, error handling
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
log = { version = "0.4", features = ["std"] }
|
||||
env_logger = "0.9"
|
||||
mp4 = "0.13"
|
||||
anyhow = { version = "1", features = ["backtrace"] }
|
||||
serde_json = "1"
|
||||
rfc6381-codec = "0.1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
10
moq-transport/README.md
Normal file
10
moq-transport/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
[](https://docs.rs/moq-transport/)
|
||||
[](https://crates.io/crates/moq-transport)
|
||||
[](LICENSE-MIT)
|
||||
|
||||
# moq-transport
|
||||
|
||||
A Rust implementation of the proposed IETF standard.
|
||||
|
||||
[Specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/)
|
||||
[Github](https://github.com/moq-wg/moq-transport)
|
||||
262
moq-transport/src/cache/broadcast.rs
vendored
Normal file
262
moq-transport/src/cache/broadcast.rs
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
//! A broadcast is a collection of tracks, split into two handles: [Publisher] and [Subscriber].
|
||||
//!
|
||||
//! The [Publisher] can create tracks, either manually or on request.
|
||||
//! It receives all requests by a [Subscriber] for a tracks that don't exist.
|
||||
//! The simplest implementation is to close every unknown track with [CacheError::NotFound].
|
||||
//!
|
||||
//! A [Subscriber] can request tracks by name.
|
||||
//! If the track already exists, it will be returned.
|
||||
//! If the track doesn't exist, it will be sent to [Unknown] to be handled.
|
||||
//! A [Subscriber] can be cloned to create multiple subscriptions.
|
||||
//!
|
||||
//! The broadcast is automatically closed with [CacheError::Closed] when [Publisher] is dropped, or all [Subscriber]s are dropped.
|
||||
use std::{
|
||||
collections::{hash_map, HashMap, VecDeque},
|
||||
fmt,
|
||||
ops::Deref,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use super::{track, CacheError, Watch};
|
||||
|
||||
/// Create a new broadcast.
|
||||
pub fn new(id: &str) -> (Publisher, Subscriber) {
|
||||
let state = Watch::new(State::default());
|
||||
let info = Arc::new(Info { id: id.to_string() });
|
||||
|
||||
let publisher = Publisher::new(state.clone(), info.clone());
|
||||
let subscriber = Subscriber::new(state, info);
|
||||
|
||||
(publisher, subscriber)
|
||||
}
|
||||
|
||||
/// Static information about a broadcast.
|
||||
#[derive(Debug)]
|
||||
pub struct Info {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
/// Dynamic information about the broadcast.
|
||||
#[derive(Debug)]
|
||||
struct State {
|
||||
tracks: HashMap<String, track::Subscriber>,
|
||||
requested: VecDeque<track::Publisher>,
|
||||
closed: Result<(), CacheError>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn get(&self, name: &str) -> Result<Option<track::Subscriber>, CacheError> {
|
||||
// Don't check closed, so we can return from cache.
|
||||
Ok(self.tracks.get(name).cloned())
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, track: track::Subscriber) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
|
||||
match self.tracks.entry(track.name.clone()) {
|
||||
hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate),
|
||||
hash_map::Entry::Vacant(v) => v.insert(track),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn request(&mut self, name: &str) -> Result<track::Subscriber, CacheError> {
|
||||
self.closed.clone()?;
|
||||
|
||||
// Create a new track.
|
||||
let (publisher, subscriber) = track::new(name);
|
||||
|
||||
// Insert the track into our Map so we deduplicate future requests.
|
||||
self.tracks.insert(name.to_string(), subscriber.clone());
|
||||
|
||||
// Send the track to the Publisher to handle.
|
||||
self.requested.push_back(publisher);
|
||||
|
||||
Ok(subscriber)
|
||||
}
|
||||
|
||||
pub fn has_next(&self) -> Result<bool, CacheError> {
|
||||
// Check if there's any elements in the queue before checking closed.
|
||||
if !self.requested.is_empty() {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
self.closed.clone()?;
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub fn next(&mut self) -> track::Publisher {
|
||||
// We panic instead of erroring to avoid a nasty wakeup loop if you don't call has_next first.
|
||||
self.requested.pop_front().expect("no entry in queue")
|
||||
}
|
||||
|
||||
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
self.closed = Err(err);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tracks: HashMap::new(),
|
||||
closed: Ok(()),
|
||||
requested: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Publish new tracks for a broadcast by name.
|
||||
// TODO remove Clone
|
||||
#[derive(Clone)]
|
||||
pub struct Publisher {
|
||||
state: Watch<State>,
|
||||
info: Arc<Info>,
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self { state, info, _dropped }
|
||||
}
|
||||
|
||||
/// Create a new track with the given name, inserting it into the broadcast.
|
||||
pub fn create_track(&mut self, name: &str) -> Result<track::Publisher, CacheError> {
|
||||
let (publisher, subscriber) = track::new(name);
|
||||
self.state.lock_mut().insert(subscriber)?;
|
||||
Ok(publisher)
|
||||
}
|
||||
|
||||
/// Insert a track into the broadcast.
|
||||
pub fn insert_track(&mut self, track: track::Subscriber) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().insert(track)
|
||||
}
|
||||
|
||||
/// Block until the next track requested by a subscriber.
|
||||
pub async fn next_track(&mut self) -> Result<track::Publisher, CacheError> {
|
||||
loop {
|
||||
let notify = {
|
||||
let state = self.state.lock();
|
||||
if state.has_next()? {
|
||||
return Ok(state.into_mut().next());
|
||||
}
|
||||
|
||||
state.changed()
|
||||
};
|
||||
|
||||
notify.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Close the broadcast with an error.
|
||||
pub fn close(self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().close(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Publisher {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Publisher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Publisher")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribe to a broadcast by requesting tracks.
|
||||
///
|
||||
/// This can be cloned to create handles.
|
||||
#[derive(Clone)]
|
||||
pub struct Subscriber {
|
||||
state: Watch<State>,
|
||||
info: Arc<Info>,
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self { state, info, _dropped }
|
||||
}
|
||||
|
||||
/// Get a track from the broadcast by name.
|
||||
/// If the track does not exist, it will be created and potentially fufilled by the publisher (via Unknown).
|
||||
/// Otherwise, it will return [CacheError::NotFound].
|
||||
pub fn get_track(&self, name: &str) -> Result<track::Subscriber, CacheError> {
|
||||
let state = self.state.lock();
|
||||
if let Some(track) = state.get(name)? {
|
||||
return Ok(track);
|
||||
}
|
||||
|
||||
// Request a new track if it does not exist.
|
||||
state.into_mut().request(name)
|
||||
}
|
||||
|
||||
/// Check if the broadcast is closed, either because the publisher was dropped or called [Publisher::close].
|
||||
pub fn is_closed(&self) -> Option<CacheError> {
|
||||
self.state.lock().closed.as_ref().err().cloned()
|
||||
}
|
||||
|
||||
/// Wait until if the broadcast is closed, either because the publisher was dropped or called [Publisher::close].
|
||||
pub async fn closed(&self) -> CacheError {
|
||||
loop {
|
||||
let notify = {
|
||||
let state = self.state.lock();
|
||||
if let Some(err) = state.closed.as_ref().err() {
|
||||
return err.clone();
|
||||
}
|
||||
|
||||
state.changed()
|
||||
};
|
||||
|
||||
notify.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Subscriber {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Subscriber {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Subscriber")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// A handle that closes the broadcast when dropped:
|
||||
// - when all Subscribers are dropped or
|
||||
// - when Publisher and Unknown are dropped.
|
||||
struct Dropped {
|
||||
state: Watch<State>,
|
||||
}
|
||||
|
||||
impl Dropped {
|
||||
fn new(state: Watch<State>) -> Self {
|
||||
Self { state }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Dropped {
|
||||
fn drop(&mut self) {
|
||||
self.state.lock_mut().close(CacheError::Closed).ok();
|
||||
}
|
||||
}
|
||||
51
moq-transport/src/cache/error.rs
vendored
Normal file
51
moq-transport/src/cache/error.rs
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::MoqError;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum CacheError {
|
||||
/// A clean termination, represented as error code 0.
|
||||
/// This error is automatically used when publishers or subscribers are dropped without calling close.
|
||||
#[error("closed")]
|
||||
Closed,
|
||||
|
||||
/// An ANNOUNCE_RESET or SUBSCRIBE_RESET was sent by the publisher.
|
||||
#[error("reset code={0:?}")]
|
||||
Reset(u32),
|
||||
|
||||
/// An ANNOUNCE_STOP or SUBSCRIBE_STOP was sent by the subscriber.
|
||||
#[error("stop")]
|
||||
Stop,
|
||||
|
||||
/// The requested resource was not found.
|
||||
#[error("not found")]
|
||||
NotFound,
|
||||
|
||||
/// A resource already exists with that ID.
|
||||
#[error("duplicate")]
|
||||
Duplicate,
|
||||
}
|
||||
|
||||
impl MoqError for CacheError {
|
||||
/// An integer code that is sent over the wire.
|
||||
fn code(&self) -> u32 {
|
||||
match self {
|
||||
Self::Closed => 0,
|
||||
Self::Reset(code) => *code,
|
||||
Self::Stop => 206,
|
||||
Self::NotFound => 404,
|
||||
Self::Duplicate => 409,
|
||||
}
|
||||
}
|
||||
|
||||
/// A reason that is sent over the wire.
|
||||
fn reason(&self) -> String {
|
||||
match self {
|
||||
Self::Closed => "closed".to_owned(),
|
||||
Self::Reset(code) => format!("reset code: {}", code),
|
||||
Self::Stop => "stop".to_owned(),
|
||||
Self::NotFound => "not found".to_owned(),
|
||||
Self::Duplicate => "duplicate".to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
208
moq-transport/src/cache/fragment.rs
vendored
Normal file
208
moq-transport/src/cache/fragment.rs
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
//! A fragment is a stream of bytes with a header, split into a [Publisher] and [Subscriber] handle.
|
||||
//!
|
||||
//! A [Publisher] writes an ordered stream of bytes in chunks.
|
||||
//! There's no framing, so these chunks can be of any size or position, and won't be maintained over the network.
|
||||
//!
|
||||
//! A [Subscriber] reads an ordered stream of bytes in chunks.
|
||||
//! These chunks are returned directly from the QUIC connection, so they may be of any size or position.
|
||||
//! You can clone the [Subscriber] and each will read a copy of of all future chunks. (fanout)
|
||||
//!
|
||||
//! The fragment is closed with [CacheError::Closed] when all publishers or subscribers are dropped.
|
||||
use core::fmt;
|
||||
use std::{ops::Deref, sync::Arc};
|
||||
|
||||
use crate::VarInt;
|
||||
use bytes::Bytes;
|
||||
|
||||
use super::{CacheError, Watch};
|
||||
|
||||
/// Create a new segment with the given info.
|
||||
pub fn new(info: Info) -> (Publisher, Subscriber) {
|
||||
let state = Watch::new(State::default());
|
||||
let info = Arc::new(info);
|
||||
|
||||
let publisher = Publisher::new(state.clone(), info.clone());
|
||||
let subscriber = Subscriber::new(state, info);
|
||||
|
||||
(publisher, subscriber)
|
||||
}
|
||||
|
||||
/// Static information about the segment.
|
||||
#[derive(Debug)]
|
||||
pub struct Info {
|
||||
// The sequence number of the fragment within the segment.
|
||||
// NOTE: These may be received out of order or with gaps.
|
||||
pub sequence: VarInt,
|
||||
|
||||
// The size of the fragment, optionally None if this is the last fragment in a segment.
|
||||
// TODO enforce this size.
|
||||
pub size: Option<usize>,
|
||||
}
|
||||
|
||||
struct State {
|
||||
// The data that has been received thus far.
|
||||
chunks: Vec<Bytes>,
|
||||
|
||||
// Set when the publisher is dropped.
|
||||
closed: Result<(), CacheError>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
self.closed = Err(err);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
chunks: Vec::new(),
|
||||
closed: Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// We don't want to print out the contents, so summarize.
|
||||
f.debug_struct("State").field("closed", &self.closed).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to write data to a segment and notify subscribers.
|
||||
pub struct Publisher {
|
||||
// Mutable segment state.
|
||||
state: Watch<State>,
|
||||
|
||||
// Immutable segment state.
|
||||
info: Arc<Info>,
|
||||
|
||||
// Closes the segment when all Publishers are dropped.
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self { state, info, _dropped }
|
||||
}
|
||||
|
||||
/// Write a new chunk of bytes.
|
||||
pub fn chunk(&mut self, chunk: Bytes) -> Result<(), CacheError> {
|
||||
let mut state = self.state.lock_mut();
|
||||
state.closed.clone()?;
|
||||
state.chunks.push(chunk);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Close the segment with an error.
|
||||
pub fn close(self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().close(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Publisher {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Publisher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Publisher")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Notified when a segment has new data available.
|
||||
#[derive(Clone)]
|
||||
pub struct Subscriber {
|
||||
// Modify the segment state.
|
||||
state: Watch<State>,
|
||||
|
||||
// Immutable segment state.
|
||||
info: Arc<Info>,
|
||||
|
||||
// The number of chunks that we've read.
|
||||
// NOTE: Cloned subscribers inherit this index, but then run in parallel.
|
||||
index: usize,
|
||||
|
||||
// Dropped when all Subscribers are dropped.
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
|
||||
Self {
|
||||
state,
|
||||
info,
|
||||
index: 0,
|
||||
_dropped,
|
||||
}
|
||||
}
|
||||
|
||||
/// Block until the next chunk of bytes is available.
|
||||
pub async fn chunk(&mut self) -> Result<Option<Bytes>, CacheError> {
|
||||
loop {
|
||||
let notify = {
|
||||
let state = self.state.lock();
|
||||
if self.index < state.chunks.len() {
|
||||
let chunk = state.chunks[self.index].clone();
|
||||
self.index += 1;
|
||||
return Ok(Some(chunk));
|
||||
}
|
||||
|
||||
match &state.closed {
|
||||
Err(CacheError::Closed) => return Ok(None),
|
||||
Err(err) => return Err(err.clone()),
|
||||
Ok(()) => state.changed(),
|
||||
}
|
||||
};
|
||||
|
||||
notify.await; // Try again when the state changes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Subscriber {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Subscriber {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Subscriber")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.field("index", &self.index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
struct Dropped {
|
||||
// Modify the segment state.
|
||||
state: Watch<State>,
|
||||
}
|
||||
|
||||
impl Dropped {
|
||||
fn new(state: Watch<State>) -> Self {
|
||||
Self { state }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Dropped {
|
||||
fn drop(&mut self) {
|
||||
self.state.lock_mut().close(CacheError::Closed).ok();
|
||||
}
|
||||
}
|
||||
21
moq-transport/src/cache/mod.rs
vendored
Normal file
21
moq-transport/src/cache/mod.rs
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
//! Allows a publisher to push updates, automatically caching and fanning it out to any subscribers.
|
||||
//!
|
||||
//! The hierarchy is: [broadcast] -> [track] -> [segment] -> [fragment] -> [Bytes](bytes::Bytes)
|
||||
//!
|
||||
//! The naming scheme doesn't match the spec because it's more strict, and bikeshedding of course:
|
||||
//!
|
||||
//! - [broadcast] is kinda like "track namespace"
|
||||
//! - [track] is "track"
|
||||
//! - [segment] is "group" but MUST use a single stream.
|
||||
//! - [fragment] is "object" but MUST have the same properties as the segment.
|
||||
|
||||
pub mod broadcast;
|
||||
mod error;
|
||||
pub mod fragment;
|
||||
pub mod segment;
|
||||
pub mod track;
|
||||
|
||||
pub(crate) mod watch;
|
||||
pub(crate) use watch::*;
|
||||
|
||||
pub use error::*;
|
||||
226
moq-transport/src/cache/segment.rs
vendored
Normal file
226
moq-transport/src/cache/segment.rs
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
//! A segment is a stream of fragments with a header, split into a [Publisher] and [Subscriber] handle.
|
||||
//!
|
||||
//! A [Publisher] writes an ordered stream of fragments.
|
||||
//! Each fragment can have a sequence number, allowing the subscriber to detect gaps fragments.
|
||||
//!
|
||||
//! A [Subscriber] reads an ordered stream of fragments.
|
||||
//! The subscriber can be cloned, in which case each subscriber receives a copy of each fragment. (fanout)
|
||||
//!
|
||||
//! The segment is closed with [CacheError::Closed] when all publishers or subscribers are dropped.
|
||||
use core::fmt;
|
||||
use std::{ops::Deref, sync::Arc, time};
|
||||
|
||||
use crate::VarInt;
|
||||
|
||||
use super::{fragment, CacheError, Watch};
|
||||
|
||||
/// Create a new segment with the given info.
|
||||
pub fn new(info: Info) -> (Publisher, Subscriber) {
|
||||
let state = Watch::new(State::default());
|
||||
let info = Arc::new(info);
|
||||
|
||||
let publisher = Publisher::new(state.clone(), info.clone());
|
||||
let subscriber = Subscriber::new(state, info);
|
||||
|
||||
(publisher, subscriber)
|
||||
}
|
||||
|
||||
/// Static information about the segment.
|
||||
#[derive(Debug)]
|
||||
pub struct Info {
|
||||
// The sequence number of the segment within the track.
|
||||
// NOTE: These may be received out of order or with gaps.
|
||||
pub sequence: VarInt,
|
||||
|
||||
// The priority of the segment within the BROADCAST.
|
||||
pub priority: u32,
|
||||
|
||||
// Cache the segment for at most this long.
|
||||
pub expires: Option<time::Duration>,
|
||||
}
|
||||
|
||||
struct State {
|
||||
// The data that has been received thus far.
|
||||
fragments: Vec<fragment::Subscriber>,
|
||||
|
||||
// Set when the publisher is dropped.
|
||||
closed: Result<(), CacheError>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
self.closed = Err(err);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
fragments: Vec::new(),
|
||||
closed: Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("State")
|
||||
.field("fragments", &self.fragments)
|
||||
.field("closed", &self.closed)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to write data to a segment and notify subscribers.
|
||||
pub struct Publisher {
|
||||
// Mutable segment state.
|
||||
state: Watch<State>,
|
||||
|
||||
// Immutable segment state.
|
||||
info: Arc<Info>,
|
||||
|
||||
// Closes the segment when all Publishers are dropped.
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self { state, info, _dropped }
|
||||
}
|
||||
|
||||
// Not public because it's a footgun.
|
||||
pub(crate) fn push_fragment(
|
||||
&mut self,
|
||||
sequence: VarInt,
|
||||
size: Option<usize>,
|
||||
) -> Result<fragment::Publisher, CacheError> {
|
||||
let (publisher, subscriber) = fragment::new(fragment::Info { sequence, size });
|
||||
|
||||
let mut state = self.state.lock_mut();
|
||||
state.closed.clone()?;
|
||||
state.fragments.push(subscriber);
|
||||
Ok(publisher)
|
||||
}
|
||||
|
||||
/// Write a fragment
|
||||
pub fn fragment(&mut self, sequence: VarInt, size: usize) -> Result<fragment::Publisher, CacheError> {
|
||||
self.push_fragment(sequence, Some(size))
|
||||
}
|
||||
|
||||
/// Write the last fragment, which means size can be unknown.
|
||||
pub fn final_fragment(mut self, sequence: VarInt) -> Result<fragment::Publisher, CacheError> {
|
||||
self.push_fragment(sequence, None)
|
||||
}
|
||||
|
||||
/// Close the segment with an error.
|
||||
pub fn close(self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().close(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Publisher {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Publisher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Publisher")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Notified when a segment has new data available.
|
||||
#[derive(Clone)]
|
||||
pub struct Subscriber {
|
||||
// Modify the segment state.
|
||||
state: Watch<State>,
|
||||
|
||||
// Immutable segment state.
|
||||
info: Arc<Info>,
|
||||
|
||||
// The number of chunks that we've read.
|
||||
// NOTE: Cloned subscribers inherit this index, but then run in parallel.
|
||||
index: usize,
|
||||
|
||||
// Dropped when all Subscribers are dropped.
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
|
||||
Self {
|
||||
state,
|
||||
info,
|
||||
index: 0,
|
||||
_dropped,
|
||||
}
|
||||
}
|
||||
|
||||
/// Block until the next chunk of bytes is available.
|
||||
pub async fn fragment(&mut self) -> Result<Option<fragment::Subscriber>, CacheError> {
|
||||
loop {
|
||||
let notify = {
|
||||
let state = self.state.lock();
|
||||
if self.index < state.fragments.len() {
|
||||
let fragment = state.fragments[self.index].clone();
|
||||
self.index += 1;
|
||||
return Ok(Some(fragment));
|
||||
}
|
||||
|
||||
match &state.closed {
|
||||
Err(CacheError::Closed) => return Ok(None),
|
||||
Err(err) => return Err(err.clone()),
|
||||
Ok(()) => state.changed(),
|
||||
}
|
||||
};
|
||||
|
||||
notify.await; // Try again when the state changes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Subscriber {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Subscriber {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Subscriber")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.field("index", &self.index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
struct Dropped {
|
||||
// Modify the segment state.
|
||||
state: Watch<State>,
|
||||
}
|
||||
|
||||
impl Dropped {
|
||||
fn new(state: Watch<State>) -> Self {
|
||||
Self { state }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Dropped {
|
||||
fn drop(&mut self) {
|
||||
self.state.lock_mut().close(CacheError::Closed).ok();
|
||||
}
|
||||
}
|
||||
337
moq-transport/src/cache/track.rs
vendored
Normal file
337
moq-transport/src/cache/track.rs
vendored
Normal file
@@ -0,0 +1,337 @@
|
||||
//! A track is a collection of semi-reliable and semi-ordered segments, split into a [Publisher] and [Subscriber] handle.
|
||||
//!
|
||||
//! A [Publisher] creates segments with a sequence number and priority.
|
||||
//! The sequest number is used to determine the order of segments, while the priority is used to determine which segment to transmit first.
|
||||
//! This may seem counter-intuitive, but is designed for live streaming where the newest segments may be higher priority.
|
||||
//! A cloned [Publisher] can be used to create segments in parallel, but will error if a duplicate sequence number is used.
|
||||
//!
|
||||
//! A [Subscriber] may not receive all segments in order or at all.
|
||||
//! These segments are meant to be transmitted over congested networks and the key to MoQ Tranport is to not block on them.
|
||||
//! Segments will be cached for a potentially limited duration added to the unreliable nature.
|
||||
//! A cloned [Subscriber] will receive a copy of all new segment going forward (fanout).
|
||||
//!
|
||||
//! The track is closed with [CacheError::Closed] when all publishers or subscribers are dropped.
|
||||
|
||||
use std::{collections::BinaryHeap, fmt, ops::Deref, sync::Arc, time};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use super::{segment, CacheError, Watch};
|
||||
use crate::VarInt;
|
||||
|
||||
/// Create a track with the given name.
|
||||
pub fn new(name: &str) -> (Publisher, Subscriber) {
|
||||
let state = Watch::new(State::default());
|
||||
let info = Arc::new(Info { name: name.to_string() });
|
||||
|
||||
let publisher = Publisher::new(state.clone(), info.clone());
|
||||
let subscriber = Subscriber::new(state, info);
|
||||
|
||||
(publisher, subscriber)
|
||||
}
|
||||
|
||||
/// Static information about a track.
|
||||
#[derive(Debug)]
|
||||
pub struct Info {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
struct State {
|
||||
// Store segments in received order so subscribers can detect changes.
|
||||
// The key is the segment sequence, which could have gaps.
|
||||
// A None value means the segment has expired.
|
||||
lookup: IndexMap<VarInt, Option<segment::Subscriber>>,
|
||||
|
||||
// Store when segments will expire in a priority queue.
|
||||
expires: BinaryHeap<SegmentExpiration>,
|
||||
|
||||
// The number of None entries removed from the start of the lookup.
|
||||
pruned: usize,
|
||||
|
||||
// Set when the publisher is closed/dropped, or all subscribers are dropped.
|
||||
closed: Result<(), CacheError>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
self.closed = Err(err);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> {
|
||||
self.closed.clone()?;
|
||||
|
||||
let entry = match self.lookup.entry(segment.sequence) {
|
||||
indexmap::map::Entry::Occupied(_entry) => return Err(CacheError::Duplicate),
|
||||
indexmap::map::Entry::Vacant(entry) => entry,
|
||||
};
|
||||
|
||||
if let Some(expires) = segment.expires {
|
||||
self.expires.push(SegmentExpiration {
|
||||
sequence: segment.sequence,
|
||||
expires: time::Instant::now() + expires,
|
||||
});
|
||||
}
|
||||
|
||||
entry.insert(Some(segment));
|
||||
|
||||
// Expire any existing segments on insert.
|
||||
// This means if you don't insert then you won't expire... but it's probably fine since the cache won't grow.
|
||||
// TODO Use a timer to expire segments at the correct time instead
|
||||
self.expire();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Try expiring any segments
|
||||
pub fn expire(&mut self) {
|
||||
let now = time::Instant::now();
|
||||
while let Some(segment) = self.expires.peek() {
|
||||
if segment.expires > now {
|
||||
break;
|
||||
}
|
||||
|
||||
// Update the entry to None while preserving the index.
|
||||
match self.lookup.entry(segment.sequence) {
|
||||
indexmap::map::Entry::Occupied(mut entry) => entry.insert(None),
|
||||
indexmap::map::Entry::Vacant(_) => panic!("expired segment not found"),
|
||||
};
|
||||
|
||||
self.expires.pop();
|
||||
}
|
||||
|
||||
// Remove None entries from the start of the lookup.
|
||||
while let Some((_, None)) = self.lookup.get_index(0) {
|
||||
self.lookup.shift_remove_index(0);
|
||||
self.pruned += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lookup: Default::default(),
|
||||
expires: Default::default(),
|
||||
pruned: 0,
|
||||
closed: Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("State")
|
||||
.field("lookup", &self.lookup)
|
||||
.field("pruned", &self.pruned)
|
||||
.field("closed", &self.closed)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates new segments for a track.
|
||||
pub struct Publisher {
|
||||
state: Watch<State>,
|
||||
info: Arc<Info>,
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self { state, info, _dropped }
|
||||
}
|
||||
|
||||
/// Insert a new segment.
|
||||
pub fn insert_segment(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().insert(segment)
|
||||
}
|
||||
|
||||
/// Create an insert a segment with the given info.
|
||||
pub fn create_segment(&mut self, info: segment::Info) -> Result<segment::Publisher, CacheError> {
|
||||
let (publisher, subscriber) = segment::new(info);
|
||||
self.insert_segment(subscriber)?;
|
||||
Ok(publisher)
|
||||
}
|
||||
|
||||
/// Close the segment with an error.
|
||||
pub fn close(self, err: CacheError) -> Result<(), CacheError> {
|
||||
self.state.lock_mut().close(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Publisher {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Publisher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Publisher")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives new segments for a track.
|
||||
#[derive(Clone)]
|
||||
pub struct Subscriber {
|
||||
state: Watch<State>,
|
||||
info: Arc<Info>,
|
||||
|
||||
// The index of the next segment to return.
|
||||
index: usize,
|
||||
|
||||
// If there are multiple segments to return, we put them in here to return them in priority order.
|
||||
pending: BinaryHeap<SegmentPriority>,
|
||||
|
||||
// Dropped when all subscribers are dropped.
|
||||
_dropped: Arc<Dropped>,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
|
||||
let _dropped = Arc::new(Dropped::new(state.clone()));
|
||||
Self {
|
||||
state,
|
||||
info,
|
||||
index: 0,
|
||||
pending: Default::default(),
|
||||
_dropped,
|
||||
}
|
||||
}
|
||||
|
||||
/// Block until the next segment arrives
|
||||
pub async fn segment(&mut self) -> Result<Option<segment::Subscriber>, CacheError> {
|
||||
loop {
|
||||
let notify = {
|
||||
let state = self.state.lock();
|
||||
|
||||
// Get our adjusted index, which could be negative if we've removed more broadcasts than read.
|
||||
let mut index = self.index.saturating_sub(state.pruned);
|
||||
|
||||
// Push all new segments into a priority queue.
|
||||
while index < state.lookup.len() {
|
||||
let (_, segment) = state.lookup.get_index(index).unwrap();
|
||||
|
||||
// Skip None values (expired segments).
|
||||
// TODO These might actually be expired, so we should check the expiration time.
|
||||
if let Some(segment) = segment {
|
||||
self.pending.push(SegmentPriority(segment.clone()));
|
||||
}
|
||||
|
||||
index += 1;
|
||||
}
|
||||
|
||||
self.index = state.pruned + index;
|
||||
|
||||
// Return the higher priority segment.
|
||||
if let Some(segment) = self.pending.pop() {
|
||||
return Ok(Some(segment.0));
|
||||
}
|
||||
|
||||
// Otherwise check if we need to return an error.
|
||||
match &state.closed {
|
||||
Err(CacheError::Closed) => return Ok(None),
|
||||
Err(err) => return Err(err.clone()),
|
||||
Ok(()) => state.changed(),
|
||||
}
|
||||
};
|
||||
|
||||
notify.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Subscriber {
|
||||
type Target = Info;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Subscriber {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Subscriber")
|
||||
.field("state", &self.state)
|
||||
.field("info", &self.info)
|
||||
.field("index", &self.index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Closes the track on Drop.
|
||||
struct Dropped {
|
||||
state: Watch<State>,
|
||||
}
|
||||
|
||||
impl Dropped {
|
||||
fn new(state: Watch<State>) -> Self {
|
||||
Self { state }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Dropped {
|
||||
fn drop(&mut self) {
|
||||
self.state.lock_mut().close(CacheError::Closed).ok();
|
||||
}
|
||||
}
|
||||
|
||||
// Used to order segments by expiration time.
|
||||
struct SegmentExpiration {
|
||||
sequence: VarInt,
|
||||
expires: time::Instant,
|
||||
}
|
||||
|
||||
impl Ord for SegmentExpiration {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
// Reverse order so the earliest expiration is at the top of the heap.
|
||||
other.expires.cmp(&self.expires)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for SegmentExpiration {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for SegmentExpiration {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.expires == other.expires
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for SegmentExpiration {}
|
||||
|
||||
// Used to order segments by priority
|
||||
#[derive(Clone)]
|
||||
struct SegmentPriority(pub segment::Subscriber);
|
||||
|
||||
impl Ord for SegmentPriority {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
// Reverse order so the highest priority is at the top of the heap.
|
||||
// TODO I let CodePilot generate this code so yolo
|
||||
other.0.priority.cmp(&self.0.priority)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for SegmentPriority {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for SegmentPriority {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.0.priority == other.0.priority
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for SegmentPriority {}
|
||||
180
moq-transport/src/cache/watch.rs
vendored
Normal file
180
moq-transport/src/cache/watch.rs
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
use std::{
|
||||
fmt,
|
||||
future::Future,
|
||||
ops::{Deref, DerefMut},
|
||||
pin::Pin,
|
||||
sync::{Arc, Mutex, MutexGuard},
|
||||
task,
|
||||
};
|
||||
|
||||
struct State<T> {
|
||||
value: T,
|
||||
wakers: Vec<task::Waker>,
|
||||
epoch: usize,
|
||||
}
|
||||
|
||||
impl<T> State<T> {
|
||||
pub fn new(value: T) -> Self {
|
||||
Self {
|
||||
value,
|
||||
wakers: Vec::new(),
|
||||
epoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register(&mut self, waker: &task::Waker) {
|
||||
self.wakers.retain(|existing| !existing.will_wake(waker));
|
||||
self.wakers.push(waker.clone());
|
||||
}
|
||||
|
||||
pub fn notify(&mut self) {
|
||||
self.epoch += 1;
|
||||
for waker in self.wakers.drain(..) {
|
||||
waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Default for State<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(T::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for State<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.value.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Watch<T> {
|
||||
state: Arc<Mutex<State<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Watch<T> {
|
||||
pub fn new(initial: T) -> Self {
|
||||
let state = Arc::new(Mutex::new(State::new(initial)));
|
||||
Self { state }
|
||||
}
|
||||
|
||||
pub fn lock(&self) -> WatchRef<T> {
|
||||
WatchRef {
|
||||
state: self.state.clone(),
|
||||
lock: self.state.lock().unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lock_mut(&self) -> WatchMut<T> {
|
||||
WatchMut {
|
||||
lock: self.state.lock().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Watch<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
state: self.state.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Watch<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(T::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Watch<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.state.try_lock() {
|
||||
Ok(lock) => lock.value.fmt(f),
|
||||
Err(_) => write!(f, "<locked>"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WatchRef<'a, T> {
|
||||
state: Arc<Mutex<State<T>>>,
|
||||
lock: MutexGuard<'a, State<T>>,
|
||||
}
|
||||
|
||||
impl<'a, T> WatchRef<'a, T> {
|
||||
// Release the lock and wait for a notification when next updated.
|
||||
pub fn changed(self) -> WatchChanged<T> {
|
||||
WatchChanged {
|
||||
state: self.state,
|
||||
epoch: self.lock.epoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Upgrade to a mutable references that automatically calls notify on drop.
|
||||
pub fn into_mut(self) -> WatchMut<'a, T> {
|
||||
WatchMut { lock: self.lock }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for WatchRef<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.lock.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: fmt::Debug> fmt::Debug for WatchRef<'a, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.lock.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WatchMut<'a, T> {
|
||||
lock: MutexGuard<'a, State<T>>,
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for WatchMut<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.lock.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for WatchMut<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.lock.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for WatchMut<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.lock.notify();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: fmt::Debug> fmt::Debug for WatchMut<'a, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.lock.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WatchChanged<T> {
|
||||
state: Arc<Mutex<State<T>>>,
|
||||
epoch: usize,
|
||||
}
|
||||
|
||||
impl<T> Future for WatchChanged<T> {
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll<Self::Output> {
|
||||
// TODO is there an API we can make that doesn't drop this lock?
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
if state.epoch > self.epoch {
|
||||
task::Poll::Ready(())
|
||||
} else {
|
||||
state.register(cx.waker());
|
||||
task::Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
55
moq-transport/src/coding/decode.rs
Normal file
55
moq-transport/src/coding/decode.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use super::{BoundsExceeded, VarInt};
|
||||
use std::{io, str};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
// I'm too lazy to add these trait bounds to every message type.
|
||||
// TODO Use trait aliases when they're stable, or add these bounds to every method.
|
||||
pub trait AsyncRead: tokio::io::AsyncRead + Unpin + Send {}
|
||||
impl AsyncRead for webtransport_quinn::RecvStream {}
|
||||
impl<T> AsyncRead for tokio::io::Take<&mut T> where T: AsyncRead {}
|
||||
impl<T: AsRef<[u8]> + Unpin + Send> AsyncRead for io::Cursor<T> {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Decode: Sized {
|
||||
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError>;
|
||||
}
|
||||
|
||||
/// A decode error.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DecodeError {
|
||||
#[error("unexpected end of buffer")]
|
||||
UnexpectedEnd,
|
||||
|
||||
#[error("invalid string")]
|
||||
InvalidString(#[from] str::Utf8Error),
|
||||
|
||||
#[error("invalid message: {0:?}")]
|
||||
InvalidMessage(VarInt),
|
||||
|
||||
#[error("invalid role: {0:?}")]
|
||||
InvalidRole(VarInt),
|
||||
|
||||
#[error("invalid subscribe location")]
|
||||
InvalidSubscribeLocation,
|
||||
|
||||
#[error("varint bounds exceeded")]
|
||||
BoundsExceeded(#[from] BoundsExceeded),
|
||||
|
||||
// TODO move these to ParamError
|
||||
#[error("duplicate parameter")]
|
||||
DupliateParameter,
|
||||
|
||||
#[error("missing parameter")]
|
||||
MissingParameter,
|
||||
|
||||
#[error("invalid parameter")]
|
||||
InvalidParameter,
|
||||
|
||||
#[error("io error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
// Used to signal that the stream has ended.
|
||||
#[error("no more messages")]
|
||||
Final,
|
||||
}
|
||||
27
moq-transport/src/coding/encode.rs
Normal file
27
moq-transport/src/coding/encode.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use super::BoundsExceeded;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
// I'm too lazy to add these trait bounds to every message type.
|
||||
// TODO Use trait aliases when they're stable, or add these bounds to every method.
|
||||
pub trait AsyncWrite: tokio::io::AsyncWrite + Unpin + Send {}
|
||||
impl AsyncWrite for webtransport_quinn::SendStream {}
|
||||
impl AsyncWrite for Vec<u8> {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Encode: Sized {
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError>;
|
||||
}
|
||||
|
||||
/// An encode error.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum EncodeError {
|
||||
#[error("varint too large")]
|
||||
BoundsExceeded(#[from] BoundsExceeded),
|
||||
|
||||
#[error("invalid value")]
|
||||
InvalidValue,
|
||||
|
||||
#[error("i/o error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
}
|
||||
11
moq-transport/src/coding/mod.rs
Normal file
11
moq-transport/src/coding/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
mod decode;
|
||||
mod encode;
|
||||
mod params;
|
||||
mod string;
|
||||
mod varint;
|
||||
|
||||
pub use decode::*;
|
||||
pub use encode::*;
|
||||
pub use params::*;
|
||||
pub use string::*;
|
||||
pub use varint::*;
|
||||
85
moq-transport/src/coding/params.rs
Normal file
85
moq-transport/src/coding/params.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use std::io::Cursor;
|
||||
use std::{cmp::max, collections::HashMap};
|
||||
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite, Decode, Encode};
|
||||
|
||||
use crate::{
|
||||
coding::{DecodeError, EncodeError},
|
||||
VarInt,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct Params(pub HashMap<VarInt, Vec<u8>>);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for Params {
|
||||
async fn decode<R: AsyncRead>(mut r: &mut R) -> Result<Self, DecodeError> {
|
||||
let mut params = HashMap::new();
|
||||
|
||||
// I hate this shit so much; let me encode my role and get on with my life.
|
||||
let count = VarInt::decode(r).await?;
|
||||
for _ in 0..count.into_inner() {
|
||||
let kind = VarInt::decode(r).await?;
|
||||
if params.contains_key(&kind) {
|
||||
return Err(DecodeError::DupliateParameter);
|
||||
}
|
||||
|
||||
let size = VarInt::decode(r).await?;
|
||||
|
||||
// Don't allocate the entire requested size to avoid a possible attack
|
||||
// Instead, we allocate up to 1024 and keep appending as we read further.
|
||||
let mut pr = r.take(size.into_inner());
|
||||
let mut buf = Vec::with_capacity(max(1024, pr.limit() as usize));
|
||||
pr.read_to_end(&mut buf).await?;
|
||||
params.insert(kind, buf);
|
||||
|
||||
r = pr.into_inner();
|
||||
}
|
||||
|
||||
Ok(Params(params))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for Params {
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
VarInt::try_from(self.0.len())?.encode(w).await?;
|
||||
|
||||
for (kind, value) in self.0.iter() {
|
||||
kind.encode(w).await?;
|
||||
VarInt::try_from(value.len())?.encode(w).await?;
|
||||
w.write_all(value).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Params {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub async fn set<P: Encode>(&mut self, kind: VarInt, p: P) -> Result<(), EncodeError> {
|
||||
let mut value = Vec::new();
|
||||
p.encode(&mut value).await?;
|
||||
self.0.insert(kind, value);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn has(&self, kind: VarInt) -> bool {
|
||||
self.0.contains_key(&kind)
|
||||
}
|
||||
|
||||
pub async fn get<P: Decode>(&mut self, kind: VarInt) -> Result<Option<P>, DecodeError> {
|
||||
if let Some(value) = self.0.remove(&kind) {
|
||||
let mut cursor = Cursor::new(value);
|
||||
Ok(Some(P::decode(&mut cursor).await?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
29
moq-transport/src/coding/string.rs
Normal file
29
moq-transport/src/coding/string.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use std::cmp::min;
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::VarInt;
|
||||
|
||||
use super::{Decode, DecodeError, Encode, EncodeError};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for String {
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
let size = VarInt::try_from(self.len())?;
|
||||
size.encode(w).await?;
|
||||
w.write_all(self.as_ref()).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for String {
|
||||
/// Decode a string with a varint length prefix.
|
||||
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let size = VarInt::decode(r).await?.into_inner();
|
||||
let mut str = String::with_capacity(min(1024, size) as usize);
|
||||
r.take(size).read_to_string(&mut str).await?;
|
||||
Ok(str)
|
||||
}
|
||||
}
|
||||
232
moq-transport/src/coding/varint.rs
Normal file
232
moq-transport/src/coding/varint.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
// Based on quinn-proto
|
||||
// https://github.com/quinn-rs/quinn/blob/main/quinn-proto/src/varint.rs
|
||||
// Licensed via Apache 2.0 and MIT
|
||||
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::fmt;
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use thiserror::Error;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use super::{Decode, DecodeError, Encode, EncodeError};
|
||||
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
|
||||
#[error("value out of range")]
|
||||
pub struct BoundsExceeded;
|
||||
|
||||
/// An integer less than 2^62
|
||||
///
|
||||
/// Values of this type are suitable for encoding as QUIC variable-length integer.
|
||||
// It would be neat if we could express to Rust that the top two bits are available for use as enum
|
||||
// discriminants
|
||||
#[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct VarInt(u64);
|
||||
|
||||
impl VarInt {
|
||||
/// The largest possible value.
|
||||
pub const MAX: Self = Self((1 << 62) - 1);
|
||||
|
||||
/// The smallest possible value.
|
||||
pub const ZERO: Self = Self(0);
|
||||
|
||||
/// Construct a `VarInt` infallibly using the largest available type.
|
||||
/// Larger values need to use `try_from` instead.
|
||||
pub const fn from_u32(x: u32) -> Self {
|
||||
Self(x as u64)
|
||||
}
|
||||
|
||||
/// Extract the integer value
|
||||
pub const fn into_inner(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VarInt> for u64 {
|
||||
fn from(x: VarInt) -> Self {
|
||||
x.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VarInt> for usize {
|
||||
fn from(x: VarInt) -> Self {
|
||||
x.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VarInt> for u128 {
|
||||
fn from(x: VarInt) -> Self {
|
||||
x.0 as u128
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u8> for VarInt {
|
||||
fn from(x: u8) -> Self {
|
||||
Self(x.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u16> for VarInt {
|
||||
fn from(x: u16) -> Self {
|
||||
Self(x.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for VarInt {
|
||||
fn from(x: u32) -> Self {
|
||||
Self(x.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u64> for VarInt {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^62
|
||||
fn try_from(x: u64) -> Result<Self, BoundsExceeded> {
|
||||
if x <= Self::MAX.into_inner() {
|
||||
Ok(Self(x))
|
||||
} else {
|
||||
Err(BoundsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u128> for VarInt {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^62
|
||||
fn try_from(x: u128) -> Result<Self, BoundsExceeded> {
|
||||
if x <= Self::MAX.into() {
|
||||
Ok(Self(x as u64))
|
||||
} else {
|
||||
Err(BoundsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<usize> for VarInt {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^62
|
||||
fn try_from(x: usize) -> Result<Self, BoundsExceeded> {
|
||||
Self::try_from(x as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VarInt> for u32 {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^32
|
||||
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
|
||||
if x.0 <= u32::MAX.into() {
|
||||
Ok(x.0 as u32)
|
||||
} else {
|
||||
Err(BoundsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VarInt> for u16 {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^16
|
||||
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
|
||||
if x.0 <= u16::MAX.into() {
|
||||
Ok(x.0 as u16)
|
||||
} else {
|
||||
Err(BoundsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VarInt> for u8 {
|
||||
type Error = BoundsExceeded;
|
||||
|
||||
/// Succeeds iff `x` < 2^8
|
||||
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
|
||||
if x.0 <= u8::MAX.into() {
|
||||
Ok(x.0 as u8)
|
||||
} else {
|
||||
Err(BoundsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for VarInt {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for VarInt {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for VarInt {
|
||||
/// Decode a varint from the given reader.
|
||||
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let b = r.read_u8().await?;
|
||||
Self::decode_byte(b, r).await
|
||||
}
|
||||
}
|
||||
|
||||
impl VarInt {
|
||||
/// Decode a varint given the first byte, reading the rest as needed.
|
||||
/// This is silly but useful for determining if the stream has ended.
|
||||
pub async fn decode_byte<R: AsyncRead>(b: u8, r: &mut R) -> Result<Self, DecodeError> {
|
||||
let tag = b >> 6;
|
||||
|
||||
let mut buf = [0u8; 8];
|
||||
buf[0] = b & 0b0011_1111;
|
||||
|
||||
let x = match tag {
|
||||
0b00 => u64::from(buf[0]),
|
||||
0b01 => {
|
||||
r.read_exact(buf[1..2].as_mut()).await?;
|
||||
u64::from(u16::from_be_bytes(buf[..2].try_into().unwrap()))
|
||||
}
|
||||
0b10 => {
|
||||
r.read_exact(buf[1..4].as_mut()).await?;
|
||||
u64::from(u32::from_be_bytes(buf[..4].try_into().unwrap()))
|
||||
}
|
||||
0b11 => {
|
||||
r.read_exact(buf[1..8].as_mut()).await?;
|
||||
u64::from_be_bytes(buf)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(Self(x))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for VarInt {
|
||||
/// Encode a varint to the given writer.
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
let x = self.0;
|
||||
if x < 2u64.pow(6) {
|
||||
w.write_u8(x as u8).await?;
|
||||
} else if x < 2u64.pow(14) {
|
||||
w.write_u16(0b01 << 14 | x as u16).await?;
|
||||
} else if x < 2u64.pow(30) {
|
||||
w.write_u32(0b10 << 30 | x as u32).await?;
|
||||
} else if x < 2u64.pow(62) {
|
||||
w.write_u64(0b11 << 62 | x).await?;
|
||||
} else {
|
||||
unreachable!("malformed VarInt");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// This is a fork of quinn::VarInt.
|
||||
impl From<quinn::VarInt> for VarInt {
|
||||
fn from(v: quinn::VarInt) -> Self {
|
||||
Self(v.into_inner())
|
||||
}
|
||||
}
|
||||
7
moq-transport/src/error.rs
Normal file
7
moq-transport/src/error.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub trait MoqError {
|
||||
/// An integer code that is sent over the wire.
|
||||
fn code(&self) -> u32;
|
||||
|
||||
/// An optional reason sometimes sent over the wire.
|
||||
fn reason(&self) -> String;
|
||||
}
|
||||
18
moq-transport/src/lib.rs
Normal file
18
moq-transport/src/lib.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
//! An implementation of the MoQ Transport protocol.
|
||||
//!
|
||||
//! MoQ Transport is a pub/sub protocol over QUIC.
|
||||
//! While originally designed for live media, MoQ Transport is generic and can be used for other live applications.
|
||||
//! The specification is a work in progress and will change.
|
||||
//! See the [specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) and [github](https://github.com/moq-wg/moq-transport) for any updates.
|
||||
//!
|
||||
//! This implementation has some required extensions until the draft stablizes. See: [Extensions](crate::setup::Extensions)
|
||||
mod coding;
|
||||
mod error;
|
||||
|
||||
pub mod cache;
|
||||
pub mod message;
|
||||
pub mod session;
|
||||
pub mod setup;
|
||||
|
||||
pub use coding::VarInt;
|
||||
pub use error::MoqError;
|
||||
30
moq-transport/src/message/announce.rs
Normal file
30
moq-transport/src/message/announce.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to announce the availability of a group of tracks.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Announce {
|
||||
/// The track namespace
|
||||
pub namespace: String,
|
||||
|
||||
/// Optional parameters
|
||||
pub params: Params,
|
||||
}
|
||||
|
||||
impl Announce {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let namespace = String::decode(r).await?;
|
||||
let params = Params::decode(r).await?;
|
||||
|
||||
Ok(Self { namespace, params })
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.namespace.encode(w).await?;
|
||||
self.params.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
23
moq-transport/src/message/announce_ok.rs
Normal file
23
moq-transport/src/message/announce_ok.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
use crate::{
|
||||
coding::{AsyncRead, AsyncWrite, Decode, DecodeError, Encode, EncodeError},
|
||||
setup::Extensions,
|
||||
};
|
||||
|
||||
/// Sent by the subscriber to accept an Announce.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AnnounceOk {
|
||||
// Echo back the namespace that was announced.
|
||||
// TODO Propose using an ID to save bytes.
|
||||
pub namespace: String,
|
||||
}
|
||||
|
||||
impl AnnounceOk {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let namespace = String::decode(r).await?;
|
||||
Ok(Self { namespace })
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.namespace.encode(w).await
|
||||
}
|
||||
}
|
||||
39
moq-transport/src/message/announce_reset.rs
Normal file
39
moq-transport/src/message/announce_reset.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the subscriber to reject an Announce.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AnnounceError {
|
||||
// Echo back the namespace that was reset
|
||||
pub namespace: String,
|
||||
|
||||
// An error code.
|
||||
pub code: u32,
|
||||
|
||||
// An optional, human-readable reason.
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl AnnounceError {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let namespace = String::decode(r).await?;
|
||||
let code = VarInt::decode(r).await?.try_into()?;
|
||||
let reason = String::decode(r).await?;
|
||||
|
||||
Ok(Self {
|
||||
namespace,
|
||||
code,
|
||||
reason,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.namespace.encode(w).await?;
|
||||
VarInt::from_u32(self.code).encode(w).await?;
|
||||
self.reason.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
21
moq-transport/src/message/go_away.rs
Normal file
21
moq-transport/src/message/go_away.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the server to indicate that the client should connect to a different server.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GoAway {
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
impl GoAway {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let url = String::decode(r).await?;
|
||||
Ok(Self { url })
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.url.encode(w).await
|
||||
}
|
||||
}
|
||||
160
moq-transport/src/message/mod.rs
Normal file
160
moq-transport/src/message/mod.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
//! Low-level message sent over the wire, as defined in the specification.
|
||||
//!
|
||||
//! All of these messages are sent over a bidirectional QUIC stream.
|
||||
//! This introduces some head-of-line blocking but preserves ordering.
|
||||
//! The only exception are OBJECT "messages", which are sent over dedicated QUIC streams.
|
||||
//!
|
||||
//! Messages sent by the publisher:
|
||||
//! - [Announce]
|
||||
//! - [Unannounce]
|
||||
//! - [SubscribeOk]
|
||||
//! - [SubscribeError]
|
||||
//! - [SubscribeReset]
|
||||
//! - [Object]
|
||||
//!
|
||||
//! Messages sent by the subscriber:
|
||||
//! - [Subscribe]
|
||||
//! - [Unsubscribe]
|
||||
//! - [AnnounceOk]
|
||||
//! - [AnnounceError]
|
||||
//!
|
||||
//! Example flow:
|
||||
//! ```test
|
||||
//! -> ANNOUNCE namespace="foo"
|
||||
//! <- ANNOUNCE_OK namespace="foo"
|
||||
//! <- SUBSCRIBE id=0 namespace="foo" name="bar"
|
||||
//! -> SUBSCRIBE_OK id=0
|
||||
//! -> OBJECT id=0 sequence=69 priority=4 expires=30
|
||||
//! -> OBJECT id=0 sequence=70 priority=4 expires=30
|
||||
//! -> OBJECT id=0 sequence=70 priority=4 expires=30
|
||||
//! <- SUBSCRIBE_STOP id=0
|
||||
//! -> SUBSCRIBE_RESET id=0 code=206 reason="closed by peer"
|
||||
//! ```
|
||||
mod announce;
|
||||
mod announce_ok;
|
||||
mod announce_reset;
|
||||
mod go_away;
|
||||
mod object;
|
||||
mod subscribe;
|
||||
mod subscribe_error;
|
||||
mod subscribe_fin;
|
||||
mod subscribe_ok;
|
||||
mod subscribe_reset;
|
||||
mod unannounce;
|
||||
mod unsubscribe;
|
||||
|
||||
pub use announce::*;
|
||||
pub use announce_ok::*;
|
||||
pub use announce_reset::*;
|
||||
pub use go_away::*;
|
||||
pub use object::*;
|
||||
pub use subscribe::*;
|
||||
pub use subscribe_error::*;
|
||||
pub use subscribe_fin::*;
|
||||
pub use subscribe_ok::*;
|
||||
pub use subscribe_reset::*;
|
||||
pub use unannounce::*;
|
||||
pub use unsubscribe::*;
|
||||
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
// Use a macro to generate the message types rather than copy-paste.
|
||||
// This implements a decode/encode method that uses the specified type.
|
||||
macro_rules! message_types {
|
||||
{$($name:ident = $val:expr,)*} => {
|
||||
/// All supported message types.
|
||||
#[derive(Clone)]
|
||||
pub enum Message {
|
||||
$($name($name)),*
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let t = VarInt::decode(r).await?;
|
||||
|
||||
match t.into_inner() {
|
||||
$($val => {
|
||||
let msg = $name::decode(r, ext).await?;
|
||||
Ok(Self::$name(msg))
|
||||
})*
|
||||
_ => Err(DecodeError::InvalidMessage(t)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> {
|
||||
match self {
|
||||
$(Self::$name(ref m) => {
|
||||
VarInt::from_u32($val).encode(w).await?;
|
||||
m.encode(w, ext).await
|
||||
},)*
|
||||
}
|
||||
}
|
||||
|
||||
pub fn id(&self) -> VarInt {
|
||||
match self {
|
||||
$(Self::$name(_) => {
|
||||
VarInt::from_u32($val)
|
||||
},)*
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(Self::$name(_) => {
|
||||
stringify!($name)
|
||||
},)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(impl From<$name> for Message {
|
||||
fn from(m: $name) -> Self {
|
||||
Message::$name(m)
|
||||
}
|
||||
})*
|
||||
|
||||
impl fmt::Debug for Message {
|
||||
// Delegate to the message formatter
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
$(Self::$name(ref m) => m.fmt(f),)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Each message is prefixed with the given VarInt type.
|
||||
message_types! {
|
||||
// NOTE: Object and Setup are in other modules.
|
||||
// Object = 0x0
|
||||
// ObjectUnbounded = 0x2
|
||||
// SetupClient = 0x40
|
||||
// SetupServer = 0x41
|
||||
|
||||
// SUBSCRIBE family, sent by subscriber
|
||||
Subscribe = 0x3,
|
||||
Unsubscribe = 0xa,
|
||||
|
||||
// SUBSCRIBE family, sent by publisher
|
||||
SubscribeOk = 0x4,
|
||||
SubscribeError = 0x5,
|
||||
SubscribeFin = 0xb,
|
||||
SubscribeReset = 0xc,
|
||||
|
||||
// ANNOUNCE family, sent by publisher
|
||||
Announce = 0x6,
|
||||
Unannounce = 0x9,
|
||||
|
||||
// ANNOUNCE family, sent by subscriber
|
||||
AnnounceOk = 0x7,
|
||||
AnnounceError = 0x8,
|
||||
|
||||
// Misc
|
||||
GoAway = 0x10,
|
||||
}
|
||||
108
moq-transport/src/message/object.rs
Normal file
108
moq-transport/src/message/object.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use std::{io, time};
|
||||
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
use crate::setup;
|
||||
|
||||
/// Sent by the publisher as the header of each data stream.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Object {
|
||||
// An ID for this track.
|
||||
// Proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
pub track: VarInt,
|
||||
|
||||
// The sequence number within the track.
|
||||
pub group: VarInt,
|
||||
|
||||
// The sequence number within the group.
|
||||
pub sequence: VarInt,
|
||||
|
||||
// The priority, where **smaller** values are sent first.
|
||||
pub priority: u32,
|
||||
|
||||
// Cache the object for at most this many seconds.
|
||||
// Zero means never expire.
|
||||
pub expires: Option<time::Duration>,
|
||||
|
||||
/// An optional size, allowing multiple OBJECTs on the same stream.
|
||||
pub size: Option<VarInt>,
|
||||
}
|
||||
|
||||
impl Object {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, extensions: &setup::Extensions) -> Result<Self, DecodeError> {
|
||||
// Try reading the first byte, returning a special error if the stream naturally ended.
|
||||
let typ = match r.read_u8().await {
|
||||
Ok(b) => VarInt::decode_byte(b, r).await?,
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Err(DecodeError::Final),
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let size_present = match typ.into_inner() {
|
||||
0 => false,
|
||||
2 => true,
|
||||
_ => return Err(DecodeError::InvalidMessage(typ)),
|
||||
};
|
||||
|
||||
let track = VarInt::decode(r).await?;
|
||||
let group = VarInt::decode(r).await?;
|
||||
let sequence = VarInt::decode(r).await?;
|
||||
let priority = VarInt::decode(r).await?.try_into()?;
|
||||
|
||||
let expires = match extensions.object_expires {
|
||||
true => match VarInt::decode(r).await?.into_inner() {
|
||||
0 => None,
|
||||
secs => Some(time::Duration::from_secs(secs)),
|
||||
},
|
||||
false => None,
|
||||
};
|
||||
|
||||
// The presence of the size field depends on the type.
|
||||
let size = match size_present {
|
||||
true => Some(VarInt::decode(r).await?),
|
||||
false => None,
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
track,
|
||||
group,
|
||||
sequence,
|
||||
priority,
|
||||
expires,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, extensions: &setup::Extensions) -> Result<(), EncodeError> {
|
||||
// The kind changes based on the presence of the size.
|
||||
let kind = match self.size {
|
||||
Some(_) => VarInt::from_u32(2),
|
||||
None => VarInt::ZERO,
|
||||
};
|
||||
|
||||
kind.encode(w).await?;
|
||||
self.track.encode(w).await?;
|
||||
self.group.encode(w).await?;
|
||||
self.sequence.encode(w).await?;
|
||||
VarInt::from_u32(self.priority).encode(w).await?;
|
||||
|
||||
// Round up if there's any decimal points.
|
||||
let expires = match self.expires {
|
||||
None => 0,
|
||||
Some(time::Duration::ZERO) => return Err(EncodeError::InvalidValue), // there's no way of expressing zero currently.
|
||||
Some(expires) if expires.subsec_nanos() > 0 => expires.as_secs() + 1,
|
||||
Some(expires) => expires.as_secs(),
|
||||
};
|
||||
|
||||
if extensions.object_expires {
|
||||
VarInt::try_from(expires)?.encode(w).await?;
|
||||
}
|
||||
|
||||
if let Some(size) = self.size {
|
||||
size.encode(w).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
142
moq-transport/src/message/subscribe.rs
Normal file
142
moq-transport/src/message/subscribe.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params, VarInt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the subscriber to request all future objects for the given track.
|
||||
///
|
||||
/// Objects will use the provided ID instead of the full track name, to save bytes.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Subscribe {
|
||||
/// An ID we choose so we can map to the track_name.
|
||||
// Proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
pub id: VarInt,
|
||||
|
||||
/// The track namespace.
|
||||
///
|
||||
/// Must be None if `extensions.subscribe_split` is false.
|
||||
pub namespace: Option<String>,
|
||||
|
||||
/// The track name.
|
||||
pub name: String,
|
||||
|
||||
/// The start/end group/object.
|
||||
pub start_group: SubscribeLocation,
|
||||
pub start_object: SubscribeLocation,
|
||||
pub end_group: SubscribeLocation,
|
||||
pub end_object: SubscribeLocation,
|
||||
|
||||
/// Optional parameters
|
||||
pub params: Params,
|
||||
}
|
||||
|
||||
impl Subscribe {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
|
||||
let namespace = match ext.subscribe_split {
|
||||
true => Some(String::decode(r).await?),
|
||||
false => None,
|
||||
};
|
||||
|
||||
let name = String::decode(r).await?;
|
||||
|
||||
let start_group = SubscribeLocation::decode(r).await?;
|
||||
let start_object = SubscribeLocation::decode(r).await?;
|
||||
let end_group = SubscribeLocation::decode(r).await?;
|
||||
let end_object = SubscribeLocation::decode(r).await?;
|
||||
|
||||
// You can't have a start object without a start group.
|
||||
if start_group == SubscribeLocation::None && start_object != SubscribeLocation::None {
|
||||
return Err(DecodeError::InvalidSubscribeLocation);
|
||||
}
|
||||
|
||||
// You can't have an end object without an end group.
|
||||
if end_group == SubscribeLocation::None && end_object != SubscribeLocation::None {
|
||||
return Err(DecodeError::InvalidSubscribeLocation);
|
||||
}
|
||||
|
||||
// NOTE: There's some more location restrictions in the draft, but they're enforced at a higher level.
|
||||
|
||||
let params = Params::decode(r).await?;
|
||||
|
||||
Ok(Self {
|
||||
id,
|
||||
namespace,
|
||||
name,
|
||||
start_group,
|
||||
start_object,
|
||||
end_group,
|
||||
end_object,
|
||||
params,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
|
||||
if self.namespace.is_some() != ext.subscribe_split {
|
||||
panic!("namespace must be None if subscribe_split is false");
|
||||
}
|
||||
|
||||
if ext.subscribe_split {
|
||||
self.namespace.as_ref().unwrap().encode(w).await?;
|
||||
}
|
||||
|
||||
self.name.encode(w).await?;
|
||||
|
||||
self.start_group.encode(w).await?;
|
||||
self.start_object.encode(w).await?;
|
||||
self.end_group.encode(w).await?;
|
||||
self.end_object.encode(w).await?;
|
||||
|
||||
self.params.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Signal where the subscription should begin, relative to the current cache.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum SubscribeLocation {
|
||||
None,
|
||||
Absolute(VarInt),
|
||||
Latest(VarInt),
|
||||
Future(VarInt),
|
||||
}
|
||||
|
||||
impl SubscribeLocation {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let kind = VarInt::decode(r).await?;
|
||||
|
||||
match kind.into_inner() {
|
||||
0 => Ok(Self::None),
|
||||
1 => Ok(Self::Absolute(VarInt::decode(r).await?)),
|
||||
2 => Ok(Self::Latest(VarInt::decode(r).await?)),
|
||||
3 => Ok(Self::Future(VarInt::decode(r).await?)),
|
||||
_ => Err(DecodeError::InvalidSubscribeLocation),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
match self {
|
||||
Self::None => {
|
||||
VarInt::from_u32(0).encode(w).await?;
|
||||
}
|
||||
Self::Absolute(val) => {
|
||||
VarInt::from_u32(1).encode(w).await?;
|
||||
val.encode(w).await?;
|
||||
}
|
||||
Self::Latest(val) => {
|
||||
VarInt::from_u32(2).encode(w).await?;
|
||||
val.encode(w).await?;
|
||||
}
|
||||
Self::Future(val) => {
|
||||
VarInt::from_u32(3).encode(w).await?;
|
||||
val.encode(w).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
36
moq-transport/src/message/subscribe_error.rs
Normal file
36
moq-transport/src/message/subscribe_error.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to reject a Subscribe.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubscribeError {
|
||||
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
|
||||
// The ID for this subscription.
|
||||
pub id: VarInt,
|
||||
|
||||
// An error code.
|
||||
pub code: u32,
|
||||
|
||||
// An optional, human-readable reason.
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl SubscribeError {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
let code = VarInt::decode(r).await?.try_into()?;
|
||||
let reason = String::decode(r).await?;
|
||||
|
||||
Ok(Self { id, code, reason })
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
VarInt::from_u32(self.code).encode(w).await?;
|
||||
self.reason.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
37
moq-transport/src/message/subscribe_fin.rs
Normal file
37
moq-transport/src/message/subscribe_fin.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to cleanly terminate a Subscribe.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubscribeFin {
|
||||
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
/// The ID for this subscription.
|
||||
pub id: VarInt,
|
||||
|
||||
/// The final group/object sent on this subscription.
|
||||
pub final_group: VarInt,
|
||||
pub final_object: VarInt,
|
||||
}
|
||||
|
||||
impl SubscribeFin {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
let final_group = VarInt::decode(r).await?;
|
||||
let final_object = VarInt::decode(r).await?;
|
||||
|
||||
Ok(Self {
|
||||
id,
|
||||
final_group,
|
||||
final_object,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
self.final_group.encode(w).await?;
|
||||
self.final_object.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
31
moq-transport/src/message/subscribe_ok.rs
Normal file
31
moq-transport/src/message/subscribe_ok.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to accept a Subscribe.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubscribeOk {
|
||||
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
/// The ID for this track.
|
||||
pub id: VarInt,
|
||||
|
||||
/// The subscription will expire in this many milliseconds.
|
||||
pub expires: VarInt,
|
||||
}
|
||||
|
||||
impl SubscribeOk {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
let expires = VarInt::decode(r).await?;
|
||||
Ok(Self { id, expires })
|
||||
}
|
||||
}
|
||||
|
||||
impl SubscribeOk {
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
self.expires.encode(w).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
50
moq-transport/src/message/subscribe_reset.rs
Normal file
50
moq-transport/src/message/subscribe_reset.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to terminate a Subscribe.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubscribeReset {
|
||||
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
/// The ID for this subscription.
|
||||
pub id: VarInt,
|
||||
|
||||
/// An error code.
|
||||
pub code: u32,
|
||||
|
||||
/// An optional, human-readable reason.
|
||||
pub reason: String,
|
||||
|
||||
/// The final group/object sent on this subscription.
|
||||
pub final_group: VarInt,
|
||||
pub final_object: VarInt,
|
||||
}
|
||||
|
||||
impl SubscribeReset {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
let code = VarInt::decode(r).await?.try_into()?;
|
||||
let reason = String::decode(r).await?;
|
||||
let final_group = VarInt::decode(r).await?;
|
||||
let final_object = VarInt::decode(r).await?;
|
||||
|
||||
Ok(Self {
|
||||
id,
|
||||
code,
|
||||
reason,
|
||||
final_group,
|
||||
final_object,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
VarInt::from_u32(self.code).encode(w).await?;
|
||||
self.reason.encode(w).await?;
|
||||
|
||||
self.final_group.encode(w).await?;
|
||||
self.final_object.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
25
moq-transport/src/message/unannounce.rs
Normal file
25
moq-transport/src/message/unannounce.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the publisher to terminate an Announce.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Unannounce {
|
||||
// Echo back the namespace that was reset
|
||||
pub namespace: String,
|
||||
}
|
||||
|
||||
impl Unannounce {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let namespace = String::decode(r).await?;
|
||||
|
||||
Ok(Self { namespace })
|
||||
}
|
||||
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.namespace.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
27
moq-transport/src/message/unsubscribe.rs
Normal file
27
moq-transport/src/message/unsubscribe.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
use crate::setup::Extensions;
|
||||
|
||||
/// Sent by the subscriber to terminate a Subscribe.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Unsubscribe {
|
||||
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
|
||||
|
||||
// The ID for this subscription.
|
||||
pub id: VarInt,
|
||||
}
|
||||
|
||||
impl Unsubscribe {
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
|
||||
let id = VarInt::decode(r).await?;
|
||||
Ok(Self { id })
|
||||
}
|
||||
}
|
||||
|
||||
impl Unsubscribe {
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
|
||||
self.id.encode(w).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
76
moq-transport/src/session/client.rs
Normal file
76
moq-transport/src/session/client.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use super::{Control, Publisher, SessionError, Subscriber};
|
||||
use crate::{cache::broadcast, setup};
|
||||
use webtransport_quinn::Session;
|
||||
|
||||
/// An endpoint that connects to a URL to publish and/or consume live streams.
|
||||
pub struct Client {}
|
||||
|
||||
impl Client {
|
||||
/// Connect using an established WebTransport session, performing the MoQ handshake as a publisher.
|
||||
pub async fn publisher(session: Session, source: broadcast::Subscriber) -> Result<Publisher, SessionError> {
|
||||
let control = Self::send_setup(&session, setup::Role::Publisher).await?;
|
||||
let publisher = Publisher::new(session, control, source);
|
||||
Ok(publisher)
|
||||
}
|
||||
|
||||
/// Connect using an established WebTransport session, performing the MoQ handshake as a subscriber.
|
||||
pub async fn subscriber(session: Session, source: broadcast::Publisher) -> Result<Subscriber, SessionError> {
|
||||
let control = Self::send_setup(&session, setup::Role::Subscriber).await?;
|
||||
let subscriber = Subscriber::new(session, control, source);
|
||||
Ok(subscriber)
|
||||
}
|
||||
|
||||
// TODO support performing both roles
|
||||
/*
|
||||
pub async fn connect(self) -> anyhow::Result<(Publisher, Subscriber)> {
|
||||
self.connect_role(setup::Role::Both).await
|
||||
}
|
||||
*/
|
||||
|
||||
async fn send_setup(session: &Session, role: setup::Role) -> Result<Control, SessionError> {
|
||||
let mut control = session.open_bi().await?;
|
||||
|
||||
let versions: setup::Versions = [setup::Version::DRAFT_01, setup::Version::KIXEL_01].into();
|
||||
|
||||
let client = setup::Client {
|
||||
role,
|
||||
versions: versions.clone(),
|
||||
params: Default::default(),
|
||||
|
||||
// Offer all extensions
|
||||
extensions: setup::Extensions {
|
||||
object_expires: true,
|
||||
subscriber_id: true,
|
||||
subscribe_split: true,
|
||||
},
|
||||
};
|
||||
|
||||
log::debug!("sending client SETUP: {:?}", client);
|
||||
client.encode(&mut control.0).await?;
|
||||
|
||||
let mut server = setup::Server::decode(&mut control.1).await?;
|
||||
|
||||
log::debug!("received server SETUP: {:?}", server);
|
||||
|
||||
match server.version {
|
||||
setup::Version::DRAFT_01 => {
|
||||
// We always require this extension
|
||||
server.extensions.require_subscriber_id()?;
|
||||
|
||||
if server.role.is_publisher() {
|
||||
// We only require object expires if we're a subscriber, so we don't cache objects indefinitely.
|
||||
server.extensions.require_object_expires()?;
|
||||
}
|
||||
}
|
||||
setup::Version::KIXEL_01 => {
|
||||
// KIXEL_01 didn't support extensions; all were enabled.
|
||||
server.extensions = client.extensions.clone()
|
||||
}
|
||||
_ => return Err(SessionError::Version(versions, [server.version].into())),
|
||||
}
|
||||
|
||||
let control = Control::new(control.0, control.1, server.extensions);
|
||||
|
||||
Ok(control)
|
||||
}
|
||||
}
|
||||
45
moq-transport/src/session/control.rs
Normal file
45
moq-transport/src/session/control.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
// A helper class to guard sending control messages behind a Mutex.
|
||||
|
||||
use std::{fmt, sync::Arc};
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use webtransport_quinn::{RecvStream, SendStream};
|
||||
|
||||
use super::SessionError;
|
||||
use crate::{message::Message, setup::Extensions};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct Control {
|
||||
send: Arc<Mutex<SendStream>>,
|
||||
recv: Arc<Mutex<RecvStream>>,
|
||||
pub ext: Extensions,
|
||||
}
|
||||
|
||||
impl Control {
|
||||
pub fn new(send: SendStream, recv: RecvStream, ext: Extensions) -> Self {
|
||||
Self {
|
||||
send: Arc::new(Mutex::new(send)),
|
||||
recv: Arc::new(Mutex::new(recv)),
|
||||
ext,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send<T: Into<Message> + fmt::Debug>(&self, msg: T) -> Result<(), SessionError> {
|
||||
let mut stream = self.send.lock().await;
|
||||
log::info!("sending message: {:?}", msg);
|
||||
msg.into()
|
||||
.encode(&mut *stream, &self.ext)
|
||||
.await
|
||||
.map_err(|e| SessionError::Unknown(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// It's likely a mistake to call this from two different tasks, but it's easier to just support it.
|
||||
pub async fn recv(&self) -> Result<Message, SessionError> {
|
||||
let mut stream = self.recv.lock().await;
|
||||
let msg = Message::decode(&mut *stream, &self.ext)
|
||||
.await
|
||||
.map_err(|e| SessionError::Unknown(e.to_string()))?;
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
107
moq-transport/src/session/error.rs
Normal file
107
moq-transport/src/session/error.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use crate::{cache, coding, setup, MoqError, VarInt};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum SessionError {
|
||||
#[error("webtransport error: {0}")]
|
||||
Session(#[from] webtransport_quinn::SessionError),
|
||||
|
||||
#[error("cache error: {0}")]
|
||||
Cache(#[from] cache::CacheError),
|
||||
|
||||
#[error("encode error: {0}")]
|
||||
Encode(#[from] coding::EncodeError),
|
||||
|
||||
#[error("decode error: {0}")]
|
||||
Decode(#[from] coding::DecodeError),
|
||||
|
||||
#[error("unsupported versions: client={0:?} server={1:?}")]
|
||||
Version(setup::Versions, setup::Versions),
|
||||
|
||||
#[error("incompatible roles: client={0:?} server={1:?}")]
|
||||
RoleIncompatible(setup::Role, setup::Role),
|
||||
|
||||
/// An error occured while reading from the QUIC stream.
|
||||
#[error("failed to read from stream: {0}")]
|
||||
Read(#[from] webtransport_quinn::ReadError),
|
||||
|
||||
/// An error occured while writing to the QUIC stream.
|
||||
#[error("failed to write to stream: {0}")]
|
||||
Write(#[from] webtransport_quinn::WriteError),
|
||||
|
||||
/// The role negiotiated in the handshake was violated. For example, a publisher sent a SUBSCRIBE, or a subscriber sent an OBJECT.
|
||||
#[error("role violation: msg={0}")]
|
||||
RoleViolation(VarInt),
|
||||
|
||||
/// Our enforced stream mapping was disrespected.
|
||||
#[error("stream mapping conflict")]
|
||||
StreamMapping,
|
||||
|
||||
/// The priority was invalid.
|
||||
#[error("invalid priority: {0}")]
|
||||
InvalidPriority(VarInt),
|
||||
|
||||
/// The size was invalid.
|
||||
#[error("invalid size: {0}")]
|
||||
InvalidSize(VarInt),
|
||||
|
||||
/// A required extension was not offered.
|
||||
#[error("required extension not offered: {0:?}")]
|
||||
RequiredExtension(VarInt),
|
||||
|
||||
/// Some VarInt was too large and we were too lazy to handle it
|
||||
#[error("varint bounds exceeded")]
|
||||
BoundsExceeded(#[from] coding::BoundsExceeded),
|
||||
|
||||
/// An unclassified error because I'm lazy. TODO classify these errors
|
||||
#[error("unknown error: {0}")]
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
impl MoqError for SessionError {
|
||||
/// An integer code that is sent over the wire.
|
||||
fn code(&self) -> u32 {
|
||||
match self {
|
||||
Self::Cache(err) => err.code(),
|
||||
Self::RoleIncompatible(..) => 406,
|
||||
Self::RoleViolation(..) => 405,
|
||||
Self::StreamMapping => 409,
|
||||
Self::Unknown(_) => 500,
|
||||
Self::Write(_) => 501,
|
||||
Self::Read(_) => 502,
|
||||
Self::Session(_) => 503,
|
||||
Self::Version(..) => 406,
|
||||
Self::Encode(_) => 500,
|
||||
Self::Decode(_) => 500,
|
||||
Self::InvalidPriority(_) => 400,
|
||||
Self::InvalidSize(_) => 400,
|
||||
Self::RequiredExtension(_) => 426,
|
||||
Self::BoundsExceeded(_) => 500,
|
||||
}
|
||||
}
|
||||
|
||||
/// A reason that is sent over the wire.
|
||||
fn reason(&self) -> String {
|
||||
match self {
|
||||
Self::Cache(err) => err.reason(),
|
||||
Self::RoleViolation(kind) => format!("role violation for message type {:?}", kind),
|
||||
Self::RoleIncompatible(client, server) => {
|
||||
format!(
|
||||
"role incompatible: client wanted {:?} but server wanted {:?}",
|
||||
client, server
|
||||
)
|
||||
}
|
||||
Self::Read(err) => format!("read error: {}", err),
|
||||
Self::Write(err) => format!("write error: {}", err),
|
||||
Self::Session(err) => format!("session error: {}", err),
|
||||
Self::Unknown(err) => format!("unknown error: {}", err),
|
||||
Self::Version(client, server) => format!("unsupported versions: client={:?} server={:?}", client, server),
|
||||
Self::Encode(err) => format!("encode error: {}", err),
|
||||
Self::Decode(err) => format!("decode error: {}", err),
|
||||
Self::StreamMapping => "streaming mapping conflict".to_owned(),
|
||||
Self::InvalidPriority(priority) => format!("invalid priority: {}", priority),
|
||||
Self::InvalidSize(size) => format!("invalid size: {}", size),
|
||||
Self::RequiredExtension(id) => format!("required extension was missing: {:?}", id),
|
||||
Self::BoundsExceeded(_) => "varint bounds exceeded".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
27
moq-transport/src/session/mod.rs
Normal file
27
moq-transport/src/session/mod.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
//! A MoQ Transport session, on top of a WebTransport session, on top of a QUIC connection.
|
||||
//!
|
||||
//! The handshake is relatively simple but split into different steps.
|
||||
//! All of these handshakes slightly differ depending on if the endpoint is a client or server.
|
||||
//! 1. Complete the QUIC handhake.
|
||||
//! 2. Complete the WebTransport handshake.
|
||||
//! 3. Complete the MoQ handshake.
|
||||
//!
|
||||
//! Use [Client] or [Server] for the MoQ handshake depending on the endpoint.
|
||||
//! Then, decide if you want to create a [Publisher] or [Subscriber], or both (TODO).
|
||||
//!
|
||||
//! A [Publisher] can announce broadcasts, which will automatically be served over the network.
|
||||
//! A [Subscriber] can subscribe to broadcasts, which will automatically be served over the network.
|
||||
|
||||
mod client;
|
||||
mod control;
|
||||
mod error;
|
||||
mod publisher;
|
||||
mod server;
|
||||
mod subscriber;
|
||||
|
||||
pub use client::*;
|
||||
pub(crate) use control::*;
|
||||
pub use error::*;
|
||||
pub use publisher::*;
|
||||
pub use server::*;
|
||||
pub use subscriber::*;
|
||||
237
moq-transport/src/session/publisher.rs
Normal file
237
moq-transport/src/session/publisher.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
use std::{
|
||||
collections::{hash_map, HashMap},
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use tokio::task::AbortHandle;
|
||||
use webtransport_quinn::Session;
|
||||
|
||||
use crate::{
|
||||
cache::{broadcast, segment, track, CacheError},
|
||||
message,
|
||||
message::Message,
|
||||
MoqError, VarInt,
|
||||
};
|
||||
|
||||
use super::{Control, SessionError};
|
||||
|
||||
/// Serves broadcasts over the network, automatically handling subscriptions and caching.
|
||||
// TODO Clone specific fields when a task actually needs it.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Publisher {
|
||||
// A map of active subscriptions, containing an abort handle to cancel them.
|
||||
subscribes: Arc<Mutex<HashMap<VarInt, AbortHandle>>>,
|
||||
webtransport: Session,
|
||||
control: Control,
|
||||
source: broadcast::Subscriber,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Subscriber) -> Self {
|
||||
Self {
|
||||
webtransport,
|
||||
control,
|
||||
subscribes: Default::default(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Serve a broadcast without sending an ANNOUNCE.
|
||||
// fn serve(&mut self, broadcast: broadcast::Subscriber) -> Result<(), SessionError> {
|
||||
|
||||
// TODO Wait until the next subscribe that doesn't route to an ANNOUNCE.
|
||||
// pub async fn subscribed(&mut self) -> Result<track::Producer, SessionError> {
|
||||
|
||||
pub async fn run(mut self) -> Result<(), SessionError> {
|
||||
let res = self.run_inner().await;
|
||||
|
||||
// Terminate all active subscribes on error.
|
||||
self.subscribes
|
||||
.lock()
|
||||
.unwrap()
|
||||
.drain()
|
||||
.for_each(|(_, abort)| abort.abort());
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub async fn run_inner(&mut self) -> Result<(), SessionError> {
|
||||
loop {
|
||||
tokio::select! {
|
||||
stream = self.webtransport.accept_uni() => {
|
||||
stream?;
|
||||
return Err(SessionError::RoleViolation(VarInt::ZERO));
|
||||
}
|
||||
// NOTE: this is not cancel safe, but it's fine since the other branchs are fatal.
|
||||
msg = self.control.recv() => {
|
||||
let msg = msg?;
|
||||
|
||||
log::info!("message received: {:?}", msg);
|
||||
if let Err(err) = self.recv_message(&msg).await {
|
||||
log::warn!("message error: {:?} {:?}", err, msg);
|
||||
}
|
||||
},
|
||||
// No more broadcasts are available.
|
||||
err = self.source.closed() => {
|
||||
self.webtransport.close(err.code(), err.reason().as_bytes());
|
||||
return Ok(());
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> {
|
||||
match msg {
|
||||
Message::AnnounceOk(msg) => self.recv_announce_ok(msg).await,
|
||||
Message::AnnounceError(msg) => self.recv_announce_error(msg).await,
|
||||
Message::Subscribe(msg) => self.recv_subscribe(msg).await,
|
||||
Message::Unsubscribe(msg) => self.recv_unsubscribe(msg).await,
|
||||
_ => Err(SessionError::RoleViolation(msg.id())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn recv_announce_ok(&mut self, _msg: &message::AnnounceOk) -> Result<(), SessionError> {
|
||||
// We didn't send an announce.
|
||||
Err(CacheError::NotFound.into())
|
||||
}
|
||||
|
||||
async fn recv_announce_error(&mut self, _msg: &message::AnnounceError) -> Result<(), SessionError> {
|
||||
// We didn't send an announce.
|
||||
Err(CacheError::NotFound.into())
|
||||
}
|
||||
|
||||
async fn recv_subscribe(&mut self, msg: &message::Subscribe) -> Result<(), SessionError> {
|
||||
// Assume that the subscribe ID is unique for now.
|
||||
let abort = match self.start_subscribe(msg.clone()) {
|
||||
Ok(abort) => abort,
|
||||
Err(err) => return self.reset_subscribe(msg.id, err).await,
|
||||
};
|
||||
|
||||
// Insert the abort handle into the lookup table.
|
||||
match self.subscribes.lock().unwrap().entry(msg.id) {
|
||||
hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate.into()), // TODO fatal, because we already started the task
|
||||
hash_map::Entry::Vacant(entry) => entry.insert(abort),
|
||||
};
|
||||
|
||||
self.control
|
||||
.send(message::SubscribeOk {
|
||||
id: msg.id,
|
||||
expires: VarInt::ZERO,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn reset_subscribe<E: MoqError>(&mut self, id: VarInt, err: E) -> Result<(), SessionError> {
|
||||
let msg = message::SubscribeReset {
|
||||
id,
|
||||
code: err.code(),
|
||||
reason: err.reason(),
|
||||
|
||||
// TODO properly populate these
|
||||
// But first: https://github.com/moq-wg/moq-transport/issues/313
|
||||
final_group: VarInt::ZERO,
|
||||
final_object: VarInt::ZERO,
|
||||
};
|
||||
|
||||
self.control.send(msg).await
|
||||
}
|
||||
|
||||
fn start_subscribe(&mut self, msg: message::Subscribe) -> Result<AbortHandle, SessionError> {
|
||||
// We currently don't use the namespace field in SUBSCRIBE
|
||||
// Make sure the namespace is empty if it's provided.
|
||||
if msg.namespace.as_ref().map_or(false, |namespace| !namespace.is_empty()) {
|
||||
return Err(CacheError::NotFound.into());
|
||||
}
|
||||
|
||||
let mut track = self.source.get_track(&msg.name)?;
|
||||
|
||||
// TODO only clone the fields we need
|
||||
let mut this = self.clone();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
log::info!("serving track: name={}", track.name);
|
||||
|
||||
let res = this.run_subscribe(msg.id, &mut track).await;
|
||||
if let Err(err) = &res {
|
||||
log::warn!("failed to serve track: name={} err={:#?}", track.name, err);
|
||||
}
|
||||
|
||||
// Make sure we send a reset at the end.
|
||||
let err = res.err().unwrap_or(CacheError::Closed.into());
|
||||
this.reset_subscribe(msg.id, err).await.ok();
|
||||
|
||||
// We're all done, so clean up the abort handle.
|
||||
this.subscribes.lock().unwrap().remove(&msg.id);
|
||||
});
|
||||
|
||||
Ok(handle.abort_handle())
|
||||
}
|
||||
|
||||
async fn run_subscribe(&self, id: VarInt, track: &mut track::Subscriber) -> Result<(), SessionError> {
|
||||
// TODO add an Ok method to track::Publisher so we can send SUBSCRIBE_OK
|
||||
|
||||
while let Some(mut segment) = track.segment().await? {
|
||||
// TODO only clone the fields we need
|
||||
let this = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = this.run_segment(id, &mut segment).await {
|
||||
log::warn!("failed to serve segment: {:?}", err)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_segment(&self, id: VarInt, segment: &mut segment::Subscriber) -> Result<(), SessionError> {
|
||||
log::trace!("serving group: {:?}", segment);
|
||||
|
||||
let mut stream = self.webtransport.open_uni().await?;
|
||||
|
||||
// Convert the u32 to a i32, since the Quinn set_priority is signed.
|
||||
let priority = (segment.priority as i64 - i32::MAX as i64) as i32;
|
||||
stream.set_priority(priority).ok();
|
||||
|
||||
while let Some(mut fragment) = segment.fragment().await? {
|
||||
log::trace!("serving fragment: {:?}", fragment);
|
||||
|
||||
let object = message::Object {
|
||||
track: id,
|
||||
|
||||
// Properties of the segment
|
||||
group: segment.sequence,
|
||||
priority: segment.priority,
|
||||
expires: segment.expires,
|
||||
|
||||
// Properties of the fragment
|
||||
sequence: fragment.sequence,
|
||||
size: fragment.size.map(VarInt::try_from).transpose()?,
|
||||
};
|
||||
|
||||
object
|
||||
.encode(&mut stream, &self.control.ext)
|
||||
.await
|
||||
.map_err(|e| SessionError::Unknown(e.to_string()))?;
|
||||
|
||||
while let Some(chunk) = fragment.chunk().await? {
|
||||
//log::trace!("writing chunk: {:?}", chunk);
|
||||
stream.write_all(&chunk).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn recv_unsubscribe(&mut self, msg: &message::Unsubscribe) -> Result<(), SessionError> {
|
||||
let abort = self
|
||||
.subscribes
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remove(&msg.id)
|
||||
.ok_or(CacheError::NotFound)?;
|
||||
abort.abort();
|
||||
|
||||
self.reset_subscribe(msg.id, CacheError::Stop).await
|
||||
}
|
||||
}
|
||||
116
moq-transport/src/session/server.rs
Normal file
116
moq-transport/src/session/server.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
use super::{Control, Publisher, SessionError, Subscriber};
|
||||
use crate::{cache::broadcast, setup};
|
||||
|
||||
use webtransport_quinn::{RecvStream, SendStream, Session};
|
||||
|
||||
/// An endpoint that accepts connections, publishing and/or consuming live streams.
|
||||
pub struct Server {}
|
||||
|
||||
impl Server {
|
||||
/// Accept an established Webtransport session, performing the MoQ handshake.
|
||||
///
|
||||
/// This returns a [Request] half-way through the handshake that allows the application to accept or deny the session.
|
||||
pub async fn accept(session: Session) -> Result<Request, SessionError> {
|
||||
let mut control = session.accept_bi().await?;
|
||||
|
||||
let mut client = setup::Client::decode(&mut control.1).await?;
|
||||
|
||||
log::debug!("received client SETUP: {:?}", client);
|
||||
|
||||
if client.versions.contains(&setup::Version::DRAFT_01) {
|
||||
// We always require subscriber ID.
|
||||
client.extensions.require_subscriber_id()?;
|
||||
|
||||
// We require OBJECT_EXPIRES for publishers only.
|
||||
if client.role.is_publisher() {
|
||||
client.extensions.require_object_expires()?;
|
||||
}
|
||||
|
||||
// We don't require SUBSCRIBE_SPLIT since it's easy enough to support, but it's clearly an oversight.
|
||||
// client.extensions.require(&Extension::SUBSCRIBE_SPLIT)?;
|
||||
} else if client.versions.contains(&setup::Version::KIXEL_01) {
|
||||
// Extensions didn't exist in KIXEL_01, so we set them manually.
|
||||
client.extensions = setup::Extensions {
|
||||
object_expires: true,
|
||||
subscriber_id: true,
|
||||
subscribe_split: true,
|
||||
};
|
||||
} else {
|
||||
return Err(SessionError::Version(
|
||||
client.versions,
|
||||
[setup::Version::DRAFT_01, setup::Version::KIXEL_01].into(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Request {
|
||||
session,
|
||||
client,
|
||||
control,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A partially complete MoQ Transport handshake.
|
||||
pub struct Request {
|
||||
session: Session,
|
||||
client: setup::Client,
|
||||
control: (SendStream, RecvStream),
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Accept the session as a publisher, using the provided broadcast to serve subscriptions.
|
||||
pub async fn publisher(mut self, source: broadcast::Subscriber) -> Result<Publisher, SessionError> {
|
||||
let setup = self.setup(setup::Role::Publisher)?;
|
||||
setup.encode(&mut self.control.0).await?;
|
||||
|
||||
let control = Control::new(self.control.0, self.control.1, setup.extensions);
|
||||
let publisher = Publisher::new(self.session, control, source);
|
||||
Ok(publisher)
|
||||
}
|
||||
|
||||
/// Accept the session as a subscriber only.
|
||||
pub async fn subscriber(mut self, source: broadcast::Publisher) -> Result<Subscriber, SessionError> {
|
||||
let setup = self.setup(setup::Role::Subscriber)?;
|
||||
setup.encode(&mut self.control.0).await?;
|
||||
|
||||
let control = Control::new(self.control.0, self.control.1, setup.extensions);
|
||||
let subscriber = Subscriber::new(self.session, control, source);
|
||||
Ok(subscriber)
|
||||
}
|
||||
|
||||
// TODO Accept the session and perform both roles.
|
||||
/*
|
||||
pub async fn accept(self) -> anyhow::Result<(Publisher, Subscriber)> {
|
||||
self.ok(setup::Role::Both).await
|
||||
}
|
||||
*/
|
||||
|
||||
fn setup(&mut self, role: setup::Role) -> Result<setup::Server, SessionError> {
|
||||
let server = setup::Server {
|
||||
role,
|
||||
version: setup::Version::DRAFT_01,
|
||||
extensions: self.client.extensions.clone(),
|
||||
params: Default::default(),
|
||||
};
|
||||
|
||||
log::debug!("sending server SETUP: {:?}", server);
|
||||
|
||||
// We need to sure we support the opposite of the client's role.
|
||||
// ex. if the client is a publisher, we must be a subscriber ONLY.
|
||||
if !self.client.role.is_compatible(server.role) {
|
||||
return Err(SessionError::RoleIncompatible(self.client.role, server.role));
|
||||
}
|
||||
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
/// Reject the request, closing the Webtransport session.
|
||||
pub fn reject(self, code: u32) {
|
||||
self.session.close(code, b"")
|
||||
}
|
||||
|
||||
/// The role advertised by the client.
|
||||
pub fn role(&self) -> setup::Role {
|
||||
self.client.role
|
||||
}
|
||||
}
|
||||
211
moq-transport/src/session/subscriber.rs
Normal file
211
moq-transport/src/session/subscriber.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use webtransport_quinn::{RecvStream, Session};
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{atomic, Arc, Mutex},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
cache::{broadcast, segment, track, CacheError},
|
||||
coding::DecodeError,
|
||||
message,
|
||||
message::Message,
|
||||
session::{Control, SessionError},
|
||||
VarInt,
|
||||
};
|
||||
|
||||
/// Receives broadcasts over the network, automatically handling subscriptions and caching.
|
||||
// TODO Clone specific fields when a task actually needs it.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Subscriber {
|
||||
// The webtransport session.
|
||||
webtransport: Session,
|
||||
|
||||
// The list of active subscriptions, each guarded by an mutex.
|
||||
subscribes: Arc<Mutex<HashMap<VarInt, track::Publisher>>>,
|
||||
|
||||
// The sequence number for the next subscription.
|
||||
next: Arc<atomic::AtomicU32>,
|
||||
|
||||
// A channel for sending messages.
|
||||
control: Control,
|
||||
|
||||
// All unknown subscribes comes here.
|
||||
source: broadcast::Publisher,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Publisher) -> Self {
|
||||
Self {
|
||||
webtransport,
|
||||
subscribes: Default::default(),
|
||||
next: Default::default(),
|
||||
control,
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(self) -> Result<(), SessionError> {
|
||||
let inbound = self.clone().run_inbound();
|
||||
let streams = self.clone().run_streams();
|
||||
let source = self.clone().run_source();
|
||||
|
||||
// Return the first error.
|
||||
tokio::select! {
|
||||
res = inbound => res,
|
||||
res = streams => res,
|
||||
res = source => res,
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_inbound(mut self) -> Result<(), SessionError> {
|
||||
loop {
|
||||
let msg = self.control.recv().await?;
|
||||
|
||||
log::info!("message received: {:?}", msg);
|
||||
if let Err(err) = self.recv_message(&msg) {
|
||||
log::warn!("message error: {:?} {:?}", err, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> {
|
||||
match msg {
|
||||
Message::Announce(_) => Ok(()), // don't care
|
||||
Message::Unannounce(_) => Ok(()), // also don't care
|
||||
Message::SubscribeOk(_msg) => Ok(()), // don't care
|
||||
Message::SubscribeReset(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)),
|
||||
Message::SubscribeFin(msg) => self.recv_subscribe_error(msg.id, CacheError::Closed),
|
||||
Message::SubscribeError(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)),
|
||||
Message::GoAway(_msg) => unimplemented!("GOAWAY"),
|
||||
_ => Err(SessionError::RoleViolation(msg.id())),
|
||||
}
|
||||
}
|
||||
|
||||
fn recv_subscribe_error(&mut self, id: VarInt, err: CacheError) -> Result<(), SessionError> {
|
||||
let mut subscribes = self.subscribes.lock().unwrap();
|
||||
let subscribe = subscribes.remove(&id).ok_or(CacheError::NotFound)?;
|
||||
subscribe.close(err)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_streams(self) -> Result<(), SessionError> {
|
||||
loop {
|
||||
// Accept all incoming unidirectional streams.
|
||||
let stream = self.webtransport.accept_uni().await?;
|
||||
let this = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = this.run_stream(stream).await {
|
||||
log::warn!("failed to receive stream: err={:#?}", err);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_stream(self, mut stream: RecvStream) -> Result<(), SessionError> {
|
||||
// Decode the object on the data stream.
|
||||
let mut object = message::Object::decode(&mut stream, &self.control.ext)
|
||||
.await
|
||||
.map_err(|e| SessionError::Unknown(e.to_string()))?;
|
||||
|
||||
log::trace!("first object: {:?}", object);
|
||||
|
||||
// A new scope is needed because the async compiler is dumb
|
||||
let mut segment = {
|
||||
let mut subscribes = self.subscribes.lock().unwrap();
|
||||
let track = subscribes.get_mut(&object.track).ok_or(CacheError::NotFound)?;
|
||||
|
||||
track.create_segment(segment::Info {
|
||||
sequence: object.group,
|
||||
priority: object.priority,
|
||||
expires: object.expires,
|
||||
})?
|
||||
};
|
||||
|
||||
log::trace!("received segment: {:?}", segment);
|
||||
|
||||
// Create the first fragment
|
||||
let mut fragment = segment.push_fragment(object.sequence, object.size.map(usize::from))?;
|
||||
let mut remain = object.size.map(usize::from);
|
||||
|
||||
loop {
|
||||
if let Some(0) = remain {
|
||||
// Decode the next object from the stream.
|
||||
let next = match message::Object::decode(&mut stream, &self.control.ext).await {
|
||||
Ok(next) => next,
|
||||
|
||||
// No more objects
|
||||
Err(DecodeError::Final) => break,
|
||||
|
||||
// Unknown error
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
log::trace!("next object: {:?}", object);
|
||||
|
||||
// NOTE: This is a custom restriction; not part of the moq-transport draft.
|
||||
// We require every OBJECT to contain the same priority since prioritization is done per-stream.
|
||||
// We also require every OBJECT to contain the same group so we know when the group ends, and can detect gaps.
|
||||
if next.priority != object.priority && next.group != object.group {
|
||||
return Err(SessionError::StreamMapping);
|
||||
}
|
||||
|
||||
object = next;
|
||||
|
||||
// Create a new object.
|
||||
fragment = segment.push_fragment(object.sequence, object.size.map(usize::from))?;
|
||||
remain = object.size.map(usize::from);
|
||||
|
||||
log::trace!("next fragment: {:?}", fragment);
|
||||
}
|
||||
|
||||
match stream.read_chunk(remain.unwrap_or(usize::MAX), true).await? {
|
||||
// Unbounded object has ended
|
||||
None if remain.is_none() => break,
|
||||
|
||||
// Bounded object ended early, oops.
|
||||
None => return Err(DecodeError::UnexpectedEnd.into()),
|
||||
|
||||
// NOTE: This does not make a copy!
|
||||
// Bytes are immutable and ref counted.
|
||||
Some(data) => {
|
||||
remain = remain.map(|r| r - data.bytes.len());
|
||||
|
||||
log::trace!("next chunk: {:?}", data);
|
||||
fragment.chunk(data.bytes)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_source(mut self) -> Result<(), SessionError> {
|
||||
loop {
|
||||
// NOTE: This returns Closed when the source is closed.
|
||||
let track = self.source.next_track().await?;
|
||||
let name = track.name.clone();
|
||||
|
||||
let id = VarInt::from_u32(self.next.fetch_add(1, atomic::Ordering::SeqCst));
|
||||
self.subscribes.lock().unwrap().insert(id, track);
|
||||
|
||||
let msg = message::Subscribe {
|
||||
id,
|
||||
namespace: self.control.ext.subscribe_split.then(|| "".to_string()),
|
||||
name,
|
||||
|
||||
// TODO correctly support these
|
||||
start_group: message::SubscribeLocation::Latest(VarInt::ZERO),
|
||||
start_object: message::SubscribeLocation::Absolute(VarInt::ZERO),
|
||||
end_group: message::SubscribeLocation::None,
|
||||
end_object: message::SubscribeLocation::None,
|
||||
|
||||
params: Default::default(),
|
||||
};
|
||||
|
||||
self.control.send(msg).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
72
moq-transport/src/setup/client.rs
Normal file
72
moq-transport/src/setup/client.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use super::{Extensions, Role, Versions};
|
||||
use crate::{
|
||||
coding::{Decode, DecodeError, Encode, EncodeError, Params},
|
||||
VarInt,
|
||||
};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
|
||||
/// Sent by the client to setup the session.
|
||||
// NOTE: This is not a message type, but rather the control stream header.
|
||||
// Proposal: https://github.com/moq-wg/moq-transport/issues/138
|
||||
#[derive(Debug)]
|
||||
pub struct Client {
|
||||
/// The list of supported versions in preferred order.
|
||||
pub versions: Versions,
|
||||
|
||||
/// Indicate if the client is a publisher, a subscriber, or both.
|
||||
pub role: Role,
|
||||
|
||||
/// A list of known/offered extensions.
|
||||
pub extensions: Extensions,
|
||||
|
||||
/// Unknown parameters.
|
||||
pub params: Params,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Decode a client setup message.
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let typ = VarInt::decode(r).await?;
|
||||
if typ.into_inner() != 0x40 {
|
||||
return Err(DecodeError::InvalidMessage(typ));
|
||||
}
|
||||
|
||||
let versions = Versions::decode(r).await?;
|
||||
let mut params = Params::decode(r).await?;
|
||||
|
||||
let role = params
|
||||
.get::<Role>(VarInt::from_u32(0))
|
||||
.await?
|
||||
.ok_or(DecodeError::MissingParameter)?;
|
||||
|
||||
// Make sure the PATH parameter isn't used
|
||||
// TODO: This assumes WebTransport support only
|
||||
if params.has(VarInt::from_u32(1)) {
|
||||
return Err(DecodeError::InvalidParameter);
|
||||
}
|
||||
|
||||
let extensions = Extensions::load(&mut params).await?;
|
||||
|
||||
Ok(Self {
|
||||
versions,
|
||||
role,
|
||||
extensions,
|
||||
params,
|
||||
})
|
||||
}
|
||||
|
||||
/// Encode a server setup message.
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
VarInt::from_u32(0x40).encode(w).await?;
|
||||
self.versions.encode(w).await?;
|
||||
|
||||
let mut params = self.params.clone();
|
||||
params.set(VarInt::from_u32(0), self.role).await?;
|
||||
self.extensions.store(&mut params).await?;
|
||||
|
||||
params.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
84
moq-transport/src/setup/extension.rs
Normal file
84
moq-transport/src/setup/extension.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params};
|
||||
use crate::session::SessionError;
|
||||
use crate::VarInt;
|
||||
use paste::paste;
|
||||
|
||||
/// This is a custom extension scheme to allow/require draft PRs.
|
||||
///
|
||||
/// By convention, the extension number is the PR number + 0xe0000.
|
||||
|
||||
macro_rules! extensions {
|
||||
{$($name:ident = $val:expr,)*} => {
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct Extensions {
|
||||
$(
|
||||
pub $name: bool,
|
||||
)*
|
||||
}
|
||||
|
||||
impl Extensions {
|
||||
pub async fn load(params: &mut Params) -> Result<Self, DecodeError> {
|
||||
let mut extensions = Self::default();
|
||||
|
||||
$(
|
||||
if let Some(_) = params.get::<ExtensionExists>(VarInt::from_u32($val)).await? {
|
||||
extensions.$name = true
|
||||
}
|
||||
)*
|
||||
|
||||
Ok(extensions)
|
||||
}
|
||||
|
||||
pub async fn store(&self, params: &mut Params) -> Result<(), EncodeError> {
|
||||
$(
|
||||
if self.$name {
|
||||
params.set(VarInt::from_u32($val), ExtensionExists{}).await?;
|
||||
}
|
||||
)*
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
paste! {
|
||||
$(
|
||||
pub fn [<require_ $name>](&self) -> Result<(), SessionError> {
|
||||
match self.$name {
|
||||
true => Ok(()),
|
||||
false => Err(SessionError::RequiredExtension(VarInt::from_u32($val))),
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExtensionExists;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for ExtensionExists {
|
||||
async fn decode<R: AsyncRead>(_r: &mut R) -> Result<Self, DecodeError> {
|
||||
Ok(ExtensionExists {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for ExtensionExists {
|
||||
async fn encode<W: AsyncWrite>(&self, _w: &mut W) -> Result<(), EncodeError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
extensions! {
|
||||
// required for publishers: OBJECT contains expires VarInt in seconds: https://github.com/moq-wg/moq-transport/issues/249
|
||||
// TODO write up a PR
|
||||
object_expires = 0xe00f9,
|
||||
|
||||
// required: SUBSCRIBE chooses track ID: https://github.com/moq-wg/moq-transport/pull/258
|
||||
subscriber_id = 0xe0102,
|
||||
|
||||
// optional: SUBSCRIBE contains namespace/name tuple: https://github.com/moq-wg/moq-transport/pull/277
|
||||
subscribe_split = 0xe0115,
|
||||
}
|
||||
17
moq-transport/src/setup/mod.rs
Normal file
17
moq-transport/src/setup/mod.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
//! Messages used for the MoQ Transport handshake.
|
||||
//!
|
||||
//! After establishing the WebTransport session, the client creates a bidirectional QUIC stream.
|
||||
//! The client sends the [Client] message and the server responds with the [Server] message.
|
||||
//! Both sides negotate the [Version] and [Role].
|
||||
|
||||
mod client;
|
||||
mod extension;
|
||||
mod role;
|
||||
mod server;
|
||||
mod version;
|
||||
|
||||
pub use client::*;
|
||||
pub use extension::*;
|
||||
pub use role::*;
|
||||
pub use server::*;
|
||||
pub use version::*;
|
||||
74
moq-transport/src/setup/role.rs
Normal file
74
moq-transport/src/setup/role.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
/// Indicates the endpoint is a publisher, subscriber, or both.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Role {
|
||||
Publisher,
|
||||
Subscriber,
|
||||
Both,
|
||||
}
|
||||
|
||||
impl Role {
|
||||
/// Returns true if the role is publisher.
|
||||
pub fn is_publisher(&self) -> bool {
|
||||
match self {
|
||||
Self::Publisher | Self::Both => true,
|
||||
Self::Subscriber => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the role is a subscriber.
|
||||
pub fn is_subscriber(&self) -> bool {
|
||||
match self {
|
||||
Self::Subscriber | Self::Both => true,
|
||||
Self::Publisher => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if two endpoints are compatible.
|
||||
pub fn is_compatible(&self, other: Role) -> bool {
|
||||
self.is_publisher() == other.is_subscriber() && self.is_subscriber() == other.is_publisher()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Role> for VarInt {
|
||||
fn from(r: Role) -> Self {
|
||||
VarInt::from_u32(match r {
|
||||
Role::Publisher => 0x1,
|
||||
Role::Subscriber => 0x2,
|
||||
Role::Both => 0x3,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VarInt> for Role {
|
||||
type Error = DecodeError;
|
||||
|
||||
fn try_from(v: VarInt) -> Result<Self, Self::Error> {
|
||||
match v.into_inner() {
|
||||
0x1 => Ok(Self::Publisher),
|
||||
0x2 => Ok(Self::Subscriber),
|
||||
0x3 => Ok(Self::Both),
|
||||
_ => Err(DecodeError::InvalidRole(v)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for Role {
|
||||
/// Decode the role.
|
||||
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let v = VarInt::decode(r).await?;
|
||||
v.try_into()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for Role {
|
||||
/// Encode the role.
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
VarInt::from(*self).encode(w).await
|
||||
}
|
||||
}
|
||||
71
moq-transport/src/setup/server.rs
Normal file
71
moq-transport/src/setup/server.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use super::{Extensions, Role, Version};
|
||||
use crate::{
|
||||
coding::{Decode, DecodeError, Encode, EncodeError, Params},
|
||||
VarInt,
|
||||
};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
|
||||
/// Sent by the server in response to a client setup.
|
||||
// NOTE: This is not a message type, but rather the control stream header.
|
||||
// Proposal: https://github.com/moq-wg/moq-transport/issues/138
|
||||
#[derive(Debug)]
|
||||
pub struct Server {
|
||||
/// The list of supported versions in preferred order.
|
||||
pub version: Version,
|
||||
|
||||
/// Indicate if the server is a publisher, a subscriber, or both.
|
||||
// Proposal: moq-wg/moq-transport#151
|
||||
pub role: Role,
|
||||
|
||||
/// Custom extensions.
|
||||
pub extensions: Extensions,
|
||||
|
||||
/// Unknown parameters.
|
||||
pub params: Params,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
/// Decode the server setup.
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let typ = VarInt::decode(r).await?;
|
||||
if typ.into_inner() != 0x41 {
|
||||
return Err(DecodeError::InvalidMessage(typ));
|
||||
}
|
||||
|
||||
let version = Version::decode(r).await?;
|
||||
let mut params = Params::decode(r).await?;
|
||||
|
||||
let role = params
|
||||
.get::<Role>(VarInt::from_u32(0))
|
||||
.await?
|
||||
.ok_or(DecodeError::MissingParameter)?;
|
||||
|
||||
// Make sure the PATH parameter isn't used
|
||||
if params.has(VarInt::from_u32(1)) {
|
||||
return Err(DecodeError::InvalidParameter);
|
||||
}
|
||||
|
||||
let extensions = Extensions::load(&mut params).await?;
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
role,
|
||||
extensions,
|
||||
params,
|
||||
})
|
||||
}
|
||||
|
||||
/// Encode the server setup.
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
VarInt::from_u32(0x41).encode(w).await?;
|
||||
self.version.encode(w).await?;
|
||||
|
||||
let mut params = self.params.clone();
|
||||
params.set(VarInt::from_u32(0), self.role).await?;
|
||||
self.extensions.store(&mut params).await?;
|
||||
params.encode(w).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
155
moq-transport/src/setup/version.rs
Normal file
155
moq-transport/src/setup/version.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
|
||||
|
||||
use crate::coding::{AsyncRead, AsyncWrite};
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
/// A version number negotiated during the setup.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Version(pub VarInt);
|
||||
|
||||
impl Version {
|
||||
/// https://www.ietf.org/archive/id/draft-ietf-moq-transport-00.html
|
||||
pub const DRAFT_00: Version = Version(VarInt::from_u32(0xff000000));
|
||||
|
||||
/// https://www.ietf.org/archive/id/draft-ietf-moq-transport-01.html
|
||||
pub const DRAFT_01: Version = Version(VarInt::from_u32(0xff000001));
|
||||
|
||||
/// Fork of draft-ietf-moq-transport-00.
|
||||
///
|
||||
/// Rough list of differences:
|
||||
///
|
||||
/// # Messages
|
||||
/// - Messages are sent over a control stream or a data stream.
|
||||
/// - Data streams: each unidirectional stream contains a single OBJECT message.
|
||||
/// - Control stream: a (client-initiated) bidirectional stream containing SETUP and then all other messages.
|
||||
/// - Messages do not contain a length; unknown messages are fatal.
|
||||
///
|
||||
/// # SETUP
|
||||
/// - SETUP is split into SETUP_CLIENT and SETUP_SERVER with separate IDs.
|
||||
/// - SETUP uses version `0xff00` for draft-00.
|
||||
/// - SETUP no longer contains optional parameters; all are encoded in order and possibly zero.
|
||||
/// - SETUP `role` indicates the role of the sender, not the role of the server.
|
||||
/// - SETUP `path` field removed; use WebTransport for path.
|
||||
///
|
||||
/// # SUBSCRIBE
|
||||
/// - SUBSCRIBE `full_name` is split into separate `namespace` and `name` fields.
|
||||
/// - SUBSCRIBE no longer contains optional parameters; all are encoded in order and possibly zero.
|
||||
/// - SUBSCRIBE no longer contains the `auth` parameter; use WebTransport for auth.
|
||||
/// - SUBSCRIBE no longer contains the `group` parameter; concept no longer exists.
|
||||
/// - SUBSCRIBE contains the `id` instead of SUBSCRIBE_OK.
|
||||
/// - SUBSCRIBE_OK and SUBSCRIBE_ERROR reference the subscription `id` the instead of the track `full_name`.
|
||||
/// - SUBSCRIBE_ERROR was renamed to SUBSCRIBE_RESET, sent by publisher to terminate a SUBSCRIBE.
|
||||
/// - SUBSCRIBE_STOP was added, sent by the subscriber to terminate a SUBSCRIBE.
|
||||
/// - SUBSCRIBE_OK no longer has `expires`.
|
||||
///
|
||||
/// # ANNOUNCE
|
||||
/// - ANNOUNCE no longer contains optional parameters; all are encoded in order and possibly zero.
|
||||
/// - ANNOUNCE no longer contains the `auth` field; use WebTransport for auth.
|
||||
/// - ANNOUNCE_ERROR was renamed to ANNOUNCE_RESET, sent by publisher to terminate an ANNOUNCE.
|
||||
/// - ANNOUNCE_STOP was added, sent by the subscriber to terminate an ANNOUNCE.
|
||||
///
|
||||
/// # OBJECT
|
||||
/// - OBJECT uses a dedicated QUIC stream.
|
||||
/// - OBJECT has no size and continues until stream FIN.
|
||||
/// - OBJECT `priority` is a i32 instead of a varint. (for practical reasons)
|
||||
/// - OBJECT `expires` was added, a varint in seconds.
|
||||
/// - OBJECT `group` was removed.
|
||||
///
|
||||
/// # GROUP
|
||||
/// - GROUP concept was removed, replaced with OBJECT as a QUIC stream.
|
||||
pub const KIXEL_00: Version = Version(VarInt::from_u32(0xbad00));
|
||||
|
||||
/// Fork of draft-ietf-moq-transport-01.
|
||||
///
|
||||
/// Most of the KIXEL_00 changes made it into the draft, or were reverted.
|
||||
/// This was only used for a short time until extensions were created.
|
||||
///
|
||||
/// - SUBSCRIBE contains a separate track namespace and track name field (accidental revert). [#277](https://github.com/moq-wg/moq-transport/pull/277)
|
||||
/// - SUBSCRIBE contains the `track_id` instead of SUBSCRIBE_OK. [#145](https://github.com/moq-wg/moq-transport/issues/145)
|
||||
/// - SUBSCRIBE_* reference `track_id` the instead of the `track_full_name`. [#145](https://github.com/moq-wg/moq-transport/issues/145)
|
||||
/// - OBJECT `priority` is still a VarInt, but the max value is a u32 (implementation reasons)
|
||||
/// - OBJECT messages within the same `group` MUST be on the same QUIC stream.
|
||||
pub const KIXEL_01: Version = Version(VarInt::from_u32(0xbad01));
|
||||
}
|
||||
|
||||
impl From<VarInt> for Version {
|
||||
fn from(v: VarInt) -> Self {
|
||||
Self(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Version> for VarInt {
|
||||
fn from(v: Version) -> Self {
|
||||
v.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Version {
|
||||
/// Decode the version number.
|
||||
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let v = VarInt::decode(r).await?;
|
||||
Ok(Self(v))
|
||||
}
|
||||
|
||||
/// Encode the version number.
|
||||
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
self.0.encode(w).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A list of versions in arbitrary order.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Versions(Vec<Version>);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Decode for Versions {
|
||||
/// Decode the version list.
|
||||
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
|
||||
let count = VarInt::decode(r).await?.into_inner();
|
||||
let mut vs = Vec::new();
|
||||
|
||||
for _ in 0..count {
|
||||
let v = Version::decode(r).await?;
|
||||
vs.push(v);
|
||||
}
|
||||
|
||||
Ok(Self(vs))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Encode for Versions {
|
||||
/// Encode the version list.
|
||||
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
|
||||
let size: VarInt = self.0.len().try_into()?;
|
||||
size.encode(w).await?;
|
||||
|
||||
for v in &self.0 {
|
||||
v.encode(w).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Versions {
|
||||
type Target = Vec<Version>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Version>> for Versions {
|
||||
fn from(vs: Vec<Version>) -> Self {
|
||||
Self(vs)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> From<[Version; N]> for Versions {
|
||||
fn from(vs: [Version; N]) -> Self {
|
||||
Self(vs.to_vec())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user