feat: Host a relay on Hetzner (#114)

We are hosting a [MoQ](https://quic.video) relay on a remote (bare
metal) server on Hetzner

With a lot of help from @victorpahuus
This commit is contained in:
Wanjohi
2024-09-26 21:34:42 +03:00
committed by GitHub
parent c4a6895726
commit bae089e223
74 changed files with 7107 additions and 96 deletions

View File

@@ -0,0 +1,50 @@
/// <reference types="vite/client" />
import * as Message from "./worker/message"
// This is a non-standard way of importing worklet/workers.
// Unfortunately, it's the only option because of a Vite bug: https://github.com/vitejs/vite/issues/11823
import workletURL from "./worklet/index.ts?worker&url"
// NOTE: This must be on the main thread
export class Audio {
context: AudioContext
worklet: Promise<AudioWorkletNode>
constructor(config: Message.ConfigAudio) {
this.context = new AudioContext({
latencyHint: "interactive",
sampleRate: config.sampleRate,
})
this.worklet = this.load(config)
}
private async load(config: Message.ConfigAudio): Promise<AudioWorkletNode> {
// Load the worklet source code.
await this.context.audioWorklet.addModule(workletURL)
const volume = this.context.createGain()
volume.gain.value = 2.0
// Create the worklet
const worklet = new AudioWorkletNode(this.context, "renderer")
worklet.port.addEventListener("message", this.on.bind(this))
worklet.onprocessorerror = (e: Event) => {
console.error("Audio worklet error:", e)
}
// Connect the worklet to the volume node and then to the speakers
worklet.connect(volume)
volume.connect(this.context.destination)
worklet.port.postMessage({ config })
return worklet
}
private on(_event: MessageEvent) {
// TODO
}
}

View File

@@ -0,0 +1,114 @@
/// <reference types="vite/client" />
import * as Message from "./worker/message"
import { Audio } from "./audio"
import MediaWorker from "./worker?worker"
import { RingShared } from "../common/ring"
import { Root, isAudioTrack } from "../media/catalog"
import { GroupHeader } from "../transport/objects"
export interface PlayerConfig {
canvas: OffscreenCanvas
catalog: Root
}
// This is a non-standard way of importing worklet/workers.
// Unfortunately, it's the only option because of a Vite bug: https://github.com/vitejs/vite/issues/11823
// Responsible for sending messages to the worker and worklet.
export default class Backend {
// General worker
#worker: Worker
// The audio context, which must be created on the main thread.
#audio?: Audio
constructor(config: PlayerConfig) {
// TODO does this block the main thread? If so, make this async
// @ts-expect-error: The Vite typing is wrong https://github.com/vitejs/vite/blob/22bd67d70a1390daae19ca33d7de162140d533d6/packages/vite/client.d.ts#L182
this.#worker = new MediaWorker({ format: "es" })
this.#worker.addEventListener("message", this.on.bind(this))
let sampleRate: number | undefined
let channels: number | undefined
for (const track of config.catalog.tracks) {
if (isAudioTrack(track)) {
if (sampleRate && track.selectionParams.samplerate !== sampleRate) {
throw new Error(`TODO multiple audio tracks with different sample rates`)
}
sampleRate = track.selectionParams.samplerate
// TODO properly handle weird channel configs
channels = Math.max(+track.selectionParams.channelConfig, channels ?? 0)
}
}
const msg: Message.Config = {}
// Only configure audio is we have an audio track
if (sampleRate && channels) {
msg.audio = {
channels: channels,
sampleRate: sampleRate,
ring: new RingShared(2, sampleRate / 10), // 100ms
}
this.#audio = new Audio(msg.audio)
}
// TODO only send the canvas if we have a video track
msg.video = {
canvas: config.canvas,
}
this.send({ config: msg }, msg.video.canvas)
}
async play() {
await this.#audio?.context.resume()
}
init(init: Init) {
this.send({ init })
}
segment(segment: Segment) {
this.send({ segment }, segment.stream)
}
async close() {
this.#worker.terminate()
await this.#audio?.context.close()
}
// Enforce we're sending valid types to the worker
private send(msg: Message.ToWorker, ...transfer: Transferable[]) {
//console.log("sent message from main to worker", msg)
this.#worker.postMessage(msg, transfer)
}
private on(e: MessageEvent) {
const msg = e.data as Message.FromWorker
// Don't print the verbose timeline message.
if (!msg.timeline) {
//console.log("received message from worker to main", msg)
}
}
}
export interface Init {
name: string // name of the init track
data: Uint8Array
}
export interface Segment {
init: string // name of the init track
kind: "audio" | "video"
header: GroupHeader
buffer: Uint8Array
stream: ReadableStream<Uint8Array>
}

View File

@@ -0,0 +1,190 @@
import * as Message from "./worker/message"
import { Connection } from "../transport/connection"
import * as Catalog from "../media/catalog"
import { asError } from "../common/error"
import Backend from "./backend"
import { Client } from "../transport/client"
import { GroupReader } from "../transport/objects"
export type Range = Message.Range
export type Timeline = Message.Timeline
export interface PlayerConfig {
url: string
namespace: string
fingerprint?: string // URL to fetch TLS certificate fingerprint
canvas: HTMLCanvasElement
}
// This class must be created on the main thread due to AudioContext.
export class Player {
#backend: Backend
// A periodically updated timeline
//#timeline = new Watch<Timeline | undefined>(undefined)
#connection: Connection
#catalog: Catalog.Root
// Running is a promise that resolves when the player is closed.
// #close is called with no error, while #abort is called with an error.
#running: Promise<void>
#close!: () => void
#abort!: (err: Error) => void
private constructor(connection: Connection, catalog: Catalog.Root, backend: Backend) {
this.#connection = connection
this.#catalog = catalog
this.#backend = backend
const abort = new Promise<void>((resolve, reject) => {
this.#close = resolve
this.#abort = reject
})
// Async work
this.#running = Promise.race([this.#run(), abort]).catch(this.#close)
}
static async create(config: PlayerConfig): Promise<Player> {
const client = new Client({ url: config.url, fingerprint: config.fingerprint, role: "subscriber" })
const connection = await client.connect()
const catalog = await Catalog.fetch(connection, config.namespace)
console.log("catalog", catalog)
const canvas = config.canvas.transferControlToOffscreen()
const backend = new Backend({ canvas, catalog })
return new Player(connection, catalog, backend)
}
async #run() {
const inits = new Set<[string, string]>()
const tracks = new Array<Catalog.Track>()
for (const track of this.#catalog.tracks) {
if (!track.namespace) throw new Error("track has no namespace")
if (track.initTrack) inits.add([track.namespace, track.initTrack])
tracks.push(track)
}
// Call #runInit on each unique init track
// TODO do this in parallel with #runTrack to remove a round trip
await Promise.all(Array.from(inits).map((init) => this.#runInit(...init)))
// Call #runTrack on each track
await Promise.all(tracks.map((track) => this.#runTrack(track)))
}
async #runInit(namespace: string, name: string) {
const sub = await this.#connection.subscribe(namespace, name)
try {
const init = await Promise.race([sub.data(), this.#running])
if (!init) throw new Error("no init data")
// We don't care what type of reader we get, we just want the payload.
const chunk = await init.read()
if (!chunk) throw new Error("no init chunk")
if (!(chunk.payload instanceof Uint8Array)) throw new Error("invalid init chunk")
this.#backend.init({ data: chunk.payload, name })
} finally {
await sub.close()
}
}
async #runTrack(track: Catalog.Track) {
if (!track.namespace) throw new Error("track has no namespace")
const sub = await this.#connection.subscribe(track.namespace, track.name)
try {
for (;;) {
const segment = await Promise.race([sub.data(), this.#running])
if (!segment) break
if (!(segment instanceof GroupReader)) {
throw new Error(`expected group reader for segment: ${track.name}`)
}
const kind = Catalog.isVideoTrack(track) ? "video" : Catalog.isAudioTrack(track) ? "audio" : "unknown"
if (kind == "unknown") {
throw new Error(`unknown track kind: ${track.name}`)
}
if (!track.initTrack) {
throw new Error(`no init track for segment: ${track.name}`)
}
const [buffer, stream] = segment.stream.release()
this.#backend.segment({
init: track.initTrack,
kind,
header: segment.header,
buffer,
stream,
})
}
} catch (error) {
console.error("Error in #runTrack:", error)
} finally {
await sub.close()
}
}
getCatalog() {
return this.#catalog
}
#onMessage(msg: Message.FromWorker) {
if (msg.timeline) {
//this.#timeline.update(msg.timeline)
}
}
async close(err?: Error) {
if (err) this.#abort(err)
else this.#close()
if (this.#connection) this.#connection.close()
if (this.#backend) await this.#backend.close()
}
async closed(): Promise<Error | undefined> {
try {
await this.#running
} catch (e) {
return asError(e)
}
}
/*
play() {
this.#backend.play({ minBuffer: 0.5 }) // TODO configurable
}
seek(timestamp: number) {
this.#backend.seek({ timestamp })
}
*/
async play() {
await this.#backend.play()
}
/*
async *timeline() {
for (;;) {
const [timeline, next] = this.#timeline.value()
if (timeline) yield timeline
if (!next) break
await next
}
}
*/
}

View File

@@ -0,0 +1,22 @@
{
"extends": "../tsconfig.json",
"include": ["."],
"exclude": ["./worklet"],
"compilerOptions": {
"types": ["dom-mediacapture-transform", "dom-webcodecs"]
},
"references": [
{
"path": "../common"
},
{
"path": "../transport"
},
{
"path": "../media"
}
],
"paths": {
"@/*": ["*"]
}
}

View File

@@ -0,0 +1,73 @@
import * as Message from "./message"
import { Ring } from "../../common/ring"
import { Component, Frame } from "./timeline"
import * as MP4 from "../../media/mp4"
// This is run in a worker.
export class Renderer {
#ring: Ring
#timeline: Component
#decoder!: AudioDecoder
#stream: TransformStream<Frame, AudioData>
constructor(config: Message.ConfigAudio, timeline: Component) {
this.#timeline = timeline
this.#ring = new Ring(config.ring)
this.#stream = new TransformStream({
start: this.#start.bind(this),
transform: this.#transform.bind(this),
})
this.#run().catch(console.error)
}
#start(controller: TransformStreamDefaultController) {
this.#decoder = new AudioDecoder({
output: (frame: AudioData) => {
controller.enqueue(frame)
},
error: console.warn,
})
}
#transform(frame: Frame) {
if (this.#decoder.state !== "configured") {
const track = frame.track
if (!MP4.isAudioTrack(track)) throw new Error("expected audio track")
// We only support OPUS right now which doesn't need a description.
this.#decoder.configure({
codec: track.codec,
sampleRate: track.audio.sample_rate,
numberOfChannels: track.audio.channel_count,
})
}
const chunk = new EncodedAudioChunk({
type: frame.sample.is_sync ? "key" : "delta",
timestamp: frame.sample.dts / frame.track.timescale,
duration: frame.sample.duration,
data: frame.sample.data,
})
this.#decoder.decode(chunk)
}
async #run() {
const reader = this.#timeline.frames.pipeThrough(this.#stream).getReader()
for (;;) {
const { value: frame, done } = await reader.read()
if (done) break
// Write audio samples to the ring buffer, dropping when there's no space.
const written = this.#ring.write(frame)
if (written < frame.numberOfFrames) {
console.warn(`droppped ${frame.numberOfFrames - written} audio samples`)
}
}
}
}

View File

@@ -0,0 +1,119 @@
import { Timeline } from "./timeline"
import * as Audio from "./audio"
import * as Video from "./video"
import * as MP4 from "../../media/mp4"
import * as Message from "./message"
import { asError } from "../../common/error"
import { Deferred } from "../../common/async"
import { GroupReader, Reader } from "../../transport/objects"
class Worker {
// Timeline receives samples, buffering them and choosing the timestamp to render.
#timeline = new Timeline()
// A map of init tracks.
#inits = new Map<string, Deferred<Uint8Array>>()
// Renderer requests samples, rendering video frames and emitting audio frames.
#audio?: Audio.Renderer
#video?: Video.Renderer
on(e: MessageEvent) {
const msg = e.data as Message.ToWorker
if (msg.config) {
this.#onConfig(msg.config)
} else if (msg.init) {
// TODO buffer the init segmnet so we don't hold the stream open.
this.#onInit(msg.init)
} else if (msg.segment) {
this.#onSegment(msg.segment).catch(console.warn)
} else {
throw new Error(`unknown message: + ${JSON.stringify(msg)}`)
}
}
#onConfig(msg: Message.Config) {
if (msg.audio) {
this.#audio = new Audio.Renderer(msg.audio, this.#timeline.audio)
}
if (msg.video) {
this.#video = new Video.Renderer(msg.video, this.#timeline.video)
}
}
#onInit(msg: Message.Init) {
let init = this.#inits.get(msg.name)
if (!init) {
init = new Deferred()
this.#inits.set(msg.name, init)
}
init.resolve(msg.data)
}
async #onSegment(msg: Message.Segment) {
let init = this.#inits.get(msg.init)
if (!init) {
init = new Deferred()
this.#inits.set(msg.init, init)
}
// Create a new stream that we will use to decode.
const container = new MP4.Parser(await init.promise)
const timeline = msg.kind === "audio" ? this.#timeline.audio : this.#timeline.video
const reader = new GroupReader(msg.header, new Reader(msg.buffer, msg.stream))
// Create a queue that will contain each MP4 frame.
const queue = new TransformStream<MP4.Frame>({})
const segment = queue.writable.getWriter()
// Add the segment to the timeline
const segments = timeline.segments.getWriter()
await segments.write({
sequence: msg.header.group,
frames: queue.readable,
})
segments.releaseLock()
// Read each chunk, decoding the MP4 frames and adding them to the queue.
for (;;) {
const chunk = await reader.read()
if (!chunk) {
break
}
if (!(chunk.payload instanceof Uint8Array)) {
throw new Error(`invalid payload: ${chunk.payload}`)
}
const frames = container.decode(chunk.payload)
for (const frame of frames) {
await segment.write(frame)
}
}
// We done.
await segment.close()
}
}
// Pass all events to the worker
const worker = new Worker()
self.addEventListener("message", (msg) => {
try {
worker.on(msg)
} catch (e) {
const err = asError(e)
console.warn("worker error:", err)
}
})
// Validates this is an expected message
function _send(msg: Message.FromWorker) {
postMessage(msg)
}

View File

@@ -0,0 +1,98 @@
import { GroupHeader } from "../../transport/objects"
import { RingShared } from "../../common/ring"
export interface Config {
audio?: ConfigAudio
video?: ConfigVideo
}
export interface ConfigAudio {
channels: number
sampleRate: number
ring: RingShared
}
export interface ConfigVideo {
canvas: OffscreenCanvas
}
export interface Init {
name: string // name of the init object
data: Uint8Array
}
export interface Segment {
init: string // name of the init object
kind: "audio" | "video"
header: GroupHeader
buffer: Uint8Array
stream: ReadableStream<Uint8Array>
}
/*
export interface Play {
// Start playback once the minimum buffer size has been reached.
minBuffer: number
}
export interface Seek {
timestamp: number
}
*/
// Sent periodically with the current timeline info.
export interface Timeline {
// The current playback position
timestamp?: number
// Audio specific information
audio: TimelineAudio
// Video specific information
video: TimelineVideo
}
export interface TimelineAudio {
buffer: Range[]
}
export interface TimelineVideo {
buffer: Range[]
}
export interface Range {
start: number
end: number
}
// Used to validate that only the correct messages can be sent.
// Any top level messages that can be sent to the worker.
export interface ToWorker {
// Sent to configure on startup.
config?: Config
// Sent on each init/data stream
init?: Init
segment?: Segment
/*
// Sent to control playback
play?: Play
seek?: Seek
*/
}
// Any top-level messages that can be sent from the worker.
export interface FromWorker {
// Sent back to the main thread regularly to update the UI
timeline?: Timeline
}
/*
interface ToWorklet {
config?: Audio.Config
}
*/

View File

@@ -0,0 +1,118 @@
import type { Frame } from "../../media/mp4"
export type { Frame }
export interface Range {
start: number
end: number
}
export class Timeline {
// Maintain audio and video seprarately
audio: Component
video: Component
// Construct a timeline
constructor() {
this.audio = new Component()
this.video = new Component()
}
}
interface Segment {
sequence: number
frames: ReadableStream<Frame>
}
export class Component {
#current?: Segment
frames: ReadableStream<Frame>
#segments: TransformStream<Segment, Segment>
constructor() {
this.frames = new ReadableStream({
pull: this.#pull.bind(this),
cancel: this.#cancel.bind(this),
})
// This is a hack to have an async channel with 100 items.
this.#segments = new TransformStream({}, { highWaterMark: 100 })
}
get segments() {
return this.#segments.writable
}
async #pull(controller: ReadableStreamDefaultController<Frame>) {
for (;;) {
// Get the next segment to render.
const segments = this.#segments.readable.getReader()
let res
if (this.#current) {
// Get the next frame to render.
const frames = this.#current.frames.getReader()
// Wait for either the frames or segments to be ready.
// NOTE: This assume that the first promise gets priority.
res = await Promise.race([frames.read(), segments.read()])
frames.releaseLock()
} else {
res = await segments.read()
}
segments.releaseLock()
const { value, done } = res
if (done) {
// We assume the current segment has been closed
// TODO support the segments stream closing
this.#current = undefined
continue
}
if (!isSegment(value)) {
// Return so the reader can decide when to get the next frame.
controller.enqueue(value)
return
}
// We didn't get any frames, and instead got a new segment.
if (this.#current) {
if (value.sequence < this.#current.sequence) {
// Our segment is older than the current, abandon it.
await value.frames.cancel("skipping segment; too old")
continue
} else {
// Our segment is newer than the current, cancel the old one.
await this.#current.frames.cancel("skipping segment; too slow")
}
}
this.#current = value
}
}
async #cancel(reason: any) {
if (this.#current) {
await this.#current.frames.cancel(reason)
}
const segments = this.#segments.readable.getReader()
for (;;) {
const { value: segment, done } = await segments.read()
if (done) break
await segment.frames.cancel(reason)
}
}
}
// Return if a type is a segment or frame
// eslint-disable-next-line @typescript-eslint/no-redundant-type-constituents
function isSegment(value: Segment | Frame): value is Segment {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
return (value as Segment).frames !== undefined
}

View File

@@ -0,0 +1,84 @@
import { Frame, Component } from "./timeline"
import * as MP4 from "../../media/mp4"
import * as Message from "./message"
export class Renderer {
#canvas: OffscreenCanvas
#timeline: Component
#decoder!: VideoDecoder
#queue: TransformStream<Frame, VideoFrame>
constructor(config: Message.ConfigVideo, timeline: Component) {
this.#canvas = config.canvas
this.#timeline = timeline
this.#queue = new TransformStream({
start: this.#start.bind(this),
transform: this.#transform.bind(this),
})
this.#run().catch(console.error)
}
async #run() {
const reader = this.#timeline.frames.pipeThrough(this.#queue).getReader()
for (;;) {
const { value: frame, done } = await reader.read()
if (done) break
self.requestAnimationFrame(() => {
this.#canvas.width = frame.displayWidth
this.#canvas.height = frame.displayHeight
const ctx = this.#canvas.getContext("2d")
if (!ctx) throw new Error("failed to get canvas context")
ctx.drawImage(frame, 0, 0, frame.displayWidth, frame.displayHeight) // TODO respect aspect ratio
frame.close()
})
}
}
#start(controller: TransformStreamDefaultController<VideoFrame>) {
this.#decoder = new VideoDecoder({
output: (frame: VideoFrame) => {
controller.enqueue(frame)
},
error: console.error,
})
}
#transform(frame: Frame) {
// Configure the decoder with the first frame
if (this.#decoder.state !== "configured") {
const { sample, track } = frame
const desc = sample.description
const box = desc.avcC ?? desc.hvcC ?? desc.vpcC ?? desc.av1C
if (!box) throw new Error(`unsupported codec: ${track.codec}`)
const buffer = new MP4.Stream(undefined, 0, MP4.Stream.BIG_ENDIAN)
box.write(buffer)
const description = new Uint8Array(buffer.buffer, 8) // Remove the box header.
if (!MP4.isVideoTrack(track)) throw new Error("expected video track")
this.#decoder.configure({
codec: track.codec,
codedHeight: track.video.height,
codedWidth: track.video.width,
description,
// optimizeForLatency: true
})
}
const chunk = new EncodedVideoChunk({
type: frame.sample.is_sync ? "key" : "delta",
data: frame.sample.data,
timestamp: frame.sample.dts / frame.track.timescale,
})
this.#decoder.decode(chunk)
}
}

View File

@@ -0,0 +1,58 @@
// TODO add support for @/ to avoid relative imports
import { Ring } from "../../common/ring"
import * as Message from "./message"
class Renderer extends AudioWorkletProcessor {
ring?: Ring
base: number
constructor() {
// The super constructor call is required.
super()
this.base = 0
this.port.onmessage = this.onMessage.bind(this)
}
onMessage(e: MessageEvent) {
const msg = e.data as Message.From
if (msg.config) {
this.onConfig(msg.config)
}
}
onConfig(config: Message.Config) {
this.ring = new Ring(config.ring)
}
// Inputs and outputs in groups of 128 samples.
process(inputs: Float32Array[][], outputs: Float32Array[][], _parameters: Record<string, Float32Array>): boolean {
if (!this.ring) {
// Paused
return true
}
if (inputs.length != 1 && outputs.length != 1) {
throw new Error("only a single track is supported")
}
if (this.ring.size() == this.ring.capacity) {
// This is a hack to clear any latency in the ring buffer.
// The proper solution is to play back slightly faster?
console.warn("resyncing ring buffer")
this.ring.clear()
return true
}
const output = outputs[0]
const size = this.ring.read(output)
if (size < output.length) {
// TODO trigger rebuffering event
}
return true
}
}
registerProcessor("renderer", Renderer)

View File

@@ -0,0 +1,12 @@
import { RingShared } from "../../common/ring"
export interface From {
config?: Config
}
export interface Config {
channels: number
sampleRate: number
ring: RingShared
}

View File

@@ -0,0 +1,14 @@
{
"extends": "../../tsconfig.json",
"include": ["."],
"exclude": ["./index"],
"compilerOptions": {
"lib": ["es2022"],
"types": ["audioworklet"]
},
"references": [
{
"path": "../../common"
}
]
}