mirror of
https://github.com/nestriness/nestri.git
synced 2025-12-12 08:45:38 +02:00
✨ feat: Add streaming support (#125)
This adds: - [x] Keyboard and mouse handling on the frontend - [x] Video and audio streaming from the backend to the frontend - [x] Input server that works with Websockets Update - 17/11 - [ ] Master docker container to run this - [ ] Steam runtime - [ ] Entrypoint.sh --------- Co-authored-by: Kristian Ollikainen <14197772+DatCaptainHorse@users.noreply.github.com> Co-authored-by: Kristian Ollikainen <DatCaptainHorse@users.noreply.github.com>
This commit is contained in:
@@ -1,34 +1,57 @@
|
||||
/// <reference types="vite/client" />
|
||||
|
||||
import * as Message from "./worker/message"
|
||||
import { Ring, RingShared } from "../common/ring"
|
||||
import type * as Catalog from "../karp/catalog"
|
||||
import type { Frame } from "../karp/frame"
|
||||
import type { Component } from "./timeline"
|
||||
|
||||
// This is a non-standard way of importing worklet/workers.
|
||||
// Unfortunately, it's the only option because of a Vite bug: https://github.com/vitejs/vite/issues/11823
|
||||
import workletURL from "./worklet/index.ts?worker&url"
|
||||
|
||||
// NOTE: This must be on the main thread
|
||||
export class Audio {
|
||||
context: AudioContext
|
||||
worklet: Promise<AudioWorkletNode>
|
||||
export class Renderer {
|
||||
#context: AudioContext
|
||||
#worklet: Promise<AudioWorkletNode>
|
||||
|
||||
constructor(config: Message.ConfigAudio) {
|
||||
this.context = new AudioContext({
|
||||
#ring: Ring
|
||||
#ringShared: RingShared
|
||||
|
||||
#timeline: Component
|
||||
#track: Catalog.Audio
|
||||
|
||||
#decoder!: AudioDecoder
|
||||
#stream: TransformStream<Frame, AudioData>
|
||||
|
||||
constructor(track: Catalog.Audio, timeline: Component) {
|
||||
this.#track = track
|
||||
this.#context = new AudioContext({
|
||||
latencyHint: "interactive",
|
||||
sampleRate: config.sampleRate,
|
||||
sampleRate: track.sample_rate,
|
||||
})
|
||||
|
||||
this.worklet = this.load(config)
|
||||
this.#worklet = this.load(track)
|
||||
|
||||
this.#timeline = timeline
|
||||
this.#ringShared = new RingShared(2, track.sample_rate / 10) // 100ms
|
||||
this.#ring = new Ring(this.#ringShared)
|
||||
|
||||
this.#stream = new TransformStream({
|
||||
start: this.#start.bind(this),
|
||||
transform: this.#transform.bind(this),
|
||||
})
|
||||
|
||||
this.#run().catch((err) => console.error("failed to run audio renderer: ", err))
|
||||
}
|
||||
|
||||
private async load(config: Message.ConfigAudio): Promise<AudioWorkletNode> {
|
||||
private async load(catalog: Catalog.Audio): Promise<AudioWorkletNode> {
|
||||
// Load the worklet source code.
|
||||
await this.context.audioWorklet.addModule(workletURL)
|
||||
await this.#context.audioWorklet.addModule(workletURL)
|
||||
|
||||
const volume = this.context.createGain()
|
||||
const volume = this.#context.createGain()
|
||||
volume.gain.value = 2.0
|
||||
|
||||
// Create the worklet
|
||||
const worklet = new AudioWorkletNode(this.context, "renderer")
|
||||
const worklet = new AudioWorkletNode(this.#context, "renderer")
|
||||
|
||||
worklet.port.addEventListener("message", this.on.bind(this))
|
||||
worklet.onprocessorerror = (e: Event) => {
|
||||
@@ -37,7 +60,13 @@ export class Audio {
|
||||
|
||||
// Connect the worklet to the volume node and then to the speakers
|
||||
worklet.connect(volume)
|
||||
volume.connect(this.context.destination)
|
||||
volume.connect(this.#context.destination)
|
||||
|
||||
const config = {
|
||||
sampleRate: catalog.sample_rate,
|
||||
channelCount: catalog.channel_count,
|
||||
ring: this.#ringShared,
|
||||
}
|
||||
|
||||
worklet.port.postMessage({ config })
|
||||
|
||||
@@ -47,4 +76,58 @@ export class Audio {
|
||||
private on(_event: MessageEvent) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
play() {
|
||||
this.#context.resume().catch((err) => console.warn("failed to resume audio context: ", err))
|
||||
}
|
||||
|
||||
close() {
|
||||
this.#context.close().catch((err) => console.warn("failed to close audio context: ", err))
|
||||
}
|
||||
|
||||
#start(controller: TransformStreamDefaultController) {
|
||||
this.#decoder = new AudioDecoder({
|
||||
output: (frame: AudioData) => {
|
||||
controller.enqueue(frame)
|
||||
},
|
||||
error: console.warn,
|
||||
})
|
||||
|
||||
// We only support OPUS right now which doesn't need a description.
|
||||
this.#decoder.configure({
|
||||
codec: this.#track.codec,
|
||||
sampleRate: this.#track.sample_rate,
|
||||
numberOfChannels: this.#track.channel_count,
|
||||
})
|
||||
}
|
||||
|
||||
#transform(frame: Frame) {
|
||||
const chunk = new EncodedAudioChunk({
|
||||
type: frame.type,
|
||||
timestamp: frame.timestamp,
|
||||
data: frame.data,
|
||||
})
|
||||
|
||||
this.#decoder.decode(chunk)
|
||||
}
|
||||
|
||||
async #run() {
|
||||
const reader = this.#timeline.frames.pipeThrough(this.#stream).getReader()
|
||||
|
||||
for (;;) {
|
||||
const { value: frame, done } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
// Write audio samples to the ring buffer, dropping when there's no space.
|
||||
const written = this.#ring.write(frame)
|
||||
|
||||
if (written < frame.numberOfFrames) {
|
||||
/*
|
||||
console.warn(
|
||||
`droppped ${frame.numberOfFrames - written} audio samples`,
|
||||
);
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
/// <reference types="vite/client" />
|
||||
|
||||
import * as Message from "./worker/message"
|
||||
import { Audio } from "./audio"
|
||||
|
||||
import MediaWorker from "./worker?worker"
|
||||
import { RingShared } from "../common/ring"
|
||||
import { Root, isAudioTrack } from "../media/catalog"
|
||||
import { GroupHeader } from "../transport/objects"
|
||||
|
||||
export interface PlayerConfig {
|
||||
canvas: OffscreenCanvas
|
||||
catalog: Root
|
||||
}
|
||||
|
||||
// This is a non-standard way of importing worklet/workers.
|
||||
// Unfortunately, it's the only option because of a Vite bug: https://github.com/vitejs/vite/issues/11823
|
||||
|
||||
// Responsible for sending messages to the worker and worklet.
|
||||
export default class Backend {
|
||||
// General worker
|
||||
#worker: Worker
|
||||
|
||||
// The audio context, which must be created on the main thread.
|
||||
#audio?: Audio
|
||||
|
||||
constructor(config: PlayerConfig) {
|
||||
// TODO does this block the main thread? If so, make this async
|
||||
// @ts-expect-error: The Vite typing is wrong https://github.com/vitejs/vite/blob/22bd67d70a1390daae19ca33d7de162140d533d6/packages/vite/client.d.ts#L182
|
||||
this.#worker = new MediaWorker({ format: "es" })
|
||||
this.#worker.addEventListener("message", this.on.bind(this))
|
||||
|
||||
let sampleRate: number | undefined
|
||||
let channels: number | undefined
|
||||
|
||||
for (const track of config.catalog.tracks) {
|
||||
if (isAudioTrack(track)) {
|
||||
if (sampleRate && track.selectionParams.samplerate !== sampleRate) {
|
||||
throw new Error(`TODO multiple audio tracks with different sample rates`)
|
||||
}
|
||||
|
||||
sampleRate = track.selectionParams.samplerate
|
||||
|
||||
// TODO properly handle weird channel configs
|
||||
channels = Math.max(+track.selectionParams.channelConfig, channels ?? 0)
|
||||
}
|
||||
}
|
||||
|
||||
const msg: Message.Config = {}
|
||||
|
||||
// Only configure audio is we have an audio track
|
||||
if (sampleRate && channels) {
|
||||
msg.audio = {
|
||||
channels: channels,
|
||||
sampleRate: sampleRate,
|
||||
ring: new RingShared(2, sampleRate / 10), // 100ms
|
||||
}
|
||||
|
||||
this.#audio = new Audio(msg.audio)
|
||||
}
|
||||
|
||||
// TODO only send the canvas if we have a video track
|
||||
msg.video = {
|
||||
canvas: config.canvas,
|
||||
}
|
||||
|
||||
this.send({ config: msg }, msg.video.canvas)
|
||||
}
|
||||
|
||||
async play() {
|
||||
await this.#audio?.context.resume()
|
||||
}
|
||||
|
||||
init(init: Init) {
|
||||
this.send({ init })
|
||||
}
|
||||
|
||||
segment(segment: Segment) {
|
||||
this.send({ segment }, segment.stream)
|
||||
}
|
||||
|
||||
async close() {
|
||||
this.#worker.terminate()
|
||||
await this.#audio?.context.close()
|
||||
}
|
||||
|
||||
// Enforce we're sending valid types to the worker
|
||||
private send(msg: Message.ToWorker, ...transfer: Transferable[]) {
|
||||
//console.log("sent message from main to worker", msg)
|
||||
this.#worker.postMessage(msg, transfer)
|
||||
}
|
||||
|
||||
private on(e: MessageEvent) {
|
||||
const msg = e.data as Message.FromWorker
|
||||
|
||||
// Don't print the verbose timeline message.
|
||||
if (!msg.timeline) {
|
||||
//console.log("received message from worker to main", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface Init {
|
||||
name: string // name of the init track
|
||||
data: Uint8Array
|
||||
}
|
||||
|
||||
export interface Segment {
|
||||
init: string // name of the init track
|
||||
kind: "audio" | "video"
|
||||
header: GroupHeader
|
||||
buffer: Uint8Array
|
||||
stream: ReadableStream<Uint8Array>
|
||||
}
|
||||
148
packages/moq/playback/broadcast.ts
Normal file
148
packages/moq/playback/broadcast.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import type * as Catalog from "../karp/catalog"
|
||||
import type { Connection } from "../transfork/connection"
|
||||
|
||||
import { Track } from "../transfork"
|
||||
|
||||
import { Frame } from "../karp/frame"
|
||||
import type { GroupReader } from "../transfork/model"
|
||||
import * as Audio from "./audio"
|
||||
import { Timeline } from "./timeline"
|
||||
import * as Video from "./video"
|
||||
|
||||
// This class must be created on the main thread due to AudioContext.
|
||||
export class Broadcast {
|
||||
#connection: Connection
|
||||
#catalog: Catalog.Broadcast
|
||||
|
||||
// Running is a promise that resolves when the player is closed.
|
||||
// #close is called with no error, while #abort is called with an error.
|
||||
#running: Promise<void>
|
||||
|
||||
// Timeline receives samples, buffering them and choosing the timestamp to render.
|
||||
#timeline = new Timeline()
|
||||
|
||||
#audio?: Audio.Renderer
|
||||
#video?: Video.Renderer
|
||||
|
||||
constructor(connection: Connection, catalog: Catalog.Broadcast, canvas: HTMLCanvasElement) {
|
||||
this.#connection = connection
|
||||
this.#catalog = catalog
|
||||
|
||||
const running = []
|
||||
|
||||
// Only configure audio is we have an audio track
|
||||
const audio = (catalog.audio || []).at(0)
|
||||
if (audio) {
|
||||
this.#audio = new Audio.Renderer(audio, this.#timeline.audio)
|
||||
running.push(this.#runAudio(audio))
|
||||
}
|
||||
|
||||
const video = (catalog.video || []).at(0)
|
||||
if (video) {
|
||||
this.#video = new Video.Renderer(video, canvas, this.#timeline.video)
|
||||
running.push(this.#runVideo(video))
|
||||
}
|
||||
|
||||
// Async work
|
||||
this.#running = Promise.race([...running])
|
||||
}
|
||||
|
||||
async #runAudio(audio: Catalog.Audio) {
|
||||
const track = new Track(this.#catalog.path.concat(audio.track.name), audio.track.priority)
|
||||
const sub = await this.#connection.subscribe(track)
|
||||
|
||||
try {
|
||||
for (;;) {
|
||||
const group = await Promise.race([sub.nextGroup(), this.#running])
|
||||
if (!group) break
|
||||
|
||||
this.#runAudioGroup(audio, group)
|
||||
.catch(() => {})
|
||||
.finally(() => group.close())
|
||||
}
|
||||
} finally {
|
||||
sub.close()
|
||||
}
|
||||
}
|
||||
|
||||
async #runVideo(video: Catalog.Video) {
|
||||
const track = new Track(this.#catalog.path.concat(video.track.name), video.track.priority)
|
||||
const sub = await this.#connection.subscribe(track)
|
||||
|
||||
try {
|
||||
for (;;) {
|
||||
const group = await Promise.race([sub.nextGroup(), this.#running])
|
||||
if (!group) break
|
||||
|
||||
this.#runVideoGroup(video, group)
|
||||
.catch(() => {})
|
||||
.finally(() => group.close())
|
||||
}
|
||||
} finally {
|
||||
sub.close()
|
||||
}
|
||||
}
|
||||
|
||||
async #runAudioGroup(audio: Catalog.Audio, group: GroupReader) {
|
||||
const timeline = this.#timeline.audio
|
||||
|
||||
// Create a queue that will contain each frame
|
||||
const queue = new TransformStream<Frame>({})
|
||||
const segment = queue.writable.getWriter()
|
||||
|
||||
// Add the segment to the timeline
|
||||
const segments = timeline.segments.getWriter()
|
||||
await segments.write({
|
||||
sequence: group.id,
|
||||
frames: queue.readable,
|
||||
})
|
||||
segments.releaseLock()
|
||||
|
||||
// Read each chunk, decoding the MP4 frames and adding them to the queue.
|
||||
for (;;) {
|
||||
const frame = await Frame.decode(group)
|
||||
if (!frame) break
|
||||
|
||||
await segment.write(frame)
|
||||
}
|
||||
|
||||
// We done.
|
||||
await segment.close()
|
||||
}
|
||||
|
||||
async #runVideoGroup(video: Catalog.Video, group: GroupReader) {
|
||||
const timeline = this.#timeline.video
|
||||
|
||||
// Create a queue that will contain each MP4 frame.
|
||||
const queue = new TransformStream<Frame>({})
|
||||
const segment = queue.writable.getWriter()
|
||||
|
||||
// Add the segment to the timeline
|
||||
const segments = timeline.segments.getWriter()
|
||||
await segments.write({
|
||||
sequence: group.id,
|
||||
frames: queue.readable,
|
||||
})
|
||||
segments.releaseLock()
|
||||
|
||||
for (;;) {
|
||||
const frame = await Frame.decode(group)
|
||||
if (!frame) break
|
||||
|
||||
await segment.write(frame)
|
||||
}
|
||||
|
||||
// We done.
|
||||
await segment.close()
|
||||
}
|
||||
|
||||
unmute() {
|
||||
console.debug("unmuting audio")
|
||||
this.#audio?.play()
|
||||
}
|
||||
|
||||
close() {
|
||||
this.#audio?.close()
|
||||
this.#video?.close()
|
||||
}
|
||||
}
|
||||
@@ -1,190 +1,2 @@
|
||||
import * as Message from "./worker/message"
|
||||
|
||||
import { Connection } from "../transport/connection"
|
||||
import * as Catalog from "../media/catalog"
|
||||
import { asError } from "../common/error"
|
||||
|
||||
import Backend from "./backend"
|
||||
|
||||
import { Client } from "../transport/client"
|
||||
import { GroupReader } from "../transport/objects"
|
||||
|
||||
export type Range = Message.Range
|
||||
export type Timeline = Message.Timeline
|
||||
|
||||
export interface PlayerConfig {
|
||||
url: string
|
||||
namespace: string
|
||||
fingerprint?: string // URL to fetch TLS certificate fingerprint
|
||||
canvas: HTMLCanvasElement
|
||||
}
|
||||
|
||||
// This class must be created on the main thread due to AudioContext.
|
||||
export class Player {
|
||||
#backend: Backend
|
||||
|
||||
// A periodically updated timeline
|
||||
//#timeline = new Watch<Timeline | undefined>(undefined)
|
||||
|
||||
#connection: Connection
|
||||
#catalog: Catalog.Root
|
||||
|
||||
// Running is a promise that resolves when the player is closed.
|
||||
// #close is called with no error, while #abort is called with an error.
|
||||
#running: Promise<void>
|
||||
#close!: () => void
|
||||
#abort!: (err: Error) => void
|
||||
|
||||
private constructor(connection: Connection, catalog: Catalog.Root, backend: Backend) {
|
||||
this.#connection = connection
|
||||
this.#catalog = catalog
|
||||
this.#backend = backend
|
||||
|
||||
const abort = new Promise<void>((resolve, reject) => {
|
||||
this.#close = resolve
|
||||
this.#abort = reject
|
||||
})
|
||||
|
||||
// Async work
|
||||
this.#running = Promise.race([this.#run(), abort]).catch(this.#close)
|
||||
}
|
||||
|
||||
static async create(config: PlayerConfig): Promise<Player> {
|
||||
const client = new Client({ url: config.url, fingerprint: config.fingerprint, role: "subscriber" })
|
||||
const connection = await client.connect()
|
||||
|
||||
const catalog = await Catalog.fetch(connection, config.namespace)
|
||||
console.log("catalog", catalog)
|
||||
|
||||
const canvas = config.canvas.transferControlToOffscreen()
|
||||
const backend = new Backend({ canvas, catalog })
|
||||
|
||||
return new Player(connection, catalog, backend)
|
||||
}
|
||||
|
||||
async #run() {
|
||||
const inits = new Set<[string, string]>()
|
||||
const tracks = new Array<Catalog.Track>()
|
||||
|
||||
for (const track of this.#catalog.tracks) {
|
||||
if (!track.namespace) throw new Error("track has no namespace")
|
||||
if (track.initTrack) inits.add([track.namespace, track.initTrack])
|
||||
tracks.push(track)
|
||||
}
|
||||
|
||||
// Call #runInit on each unique init track
|
||||
// TODO do this in parallel with #runTrack to remove a round trip
|
||||
await Promise.all(Array.from(inits).map((init) => this.#runInit(...init)))
|
||||
|
||||
// Call #runTrack on each track
|
||||
await Promise.all(tracks.map((track) => this.#runTrack(track)))
|
||||
}
|
||||
|
||||
async #runInit(namespace: string, name: string) {
|
||||
const sub = await this.#connection.subscribe(namespace, name)
|
||||
try {
|
||||
const init = await Promise.race([sub.data(), this.#running])
|
||||
if (!init) throw new Error("no init data")
|
||||
|
||||
// We don't care what type of reader we get, we just want the payload.
|
||||
const chunk = await init.read()
|
||||
if (!chunk) throw new Error("no init chunk")
|
||||
if (!(chunk.payload instanceof Uint8Array)) throw new Error("invalid init chunk")
|
||||
|
||||
this.#backend.init({ data: chunk.payload, name })
|
||||
} finally {
|
||||
await sub.close()
|
||||
}
|
||||
}
|
||||
|
||||
async #runTrack(track: Catalog.Track) {
|
||||
if (!track.namespace) throw new Error("track has no namespace")
|
||||
const sub = await this.#connection.subscribe(track.namespace, track.name)
|
||||
|
||||
try {
|
||||
for (;;) {
|
||||
const segment = await Promise.race([sub.data(), this.#running])
|
||||
if (!segment) break
|
||||
|
||||
if (!(segment instanceof GroupReader)) {
|
||||
throw new Error(`expected group reader for segment: ${track.name}`)
|
||||
}
|
||||
|
||||
const kind = Catalog.isVideoTrack(track) ? "video" : Catalog.isAudioTrack(track) ? "audio" : "unknown"
|
||||
if (kind == "unknown") {
|
||||
throw new Error(`unknown track kind: ${track.name}`)
|
||||
}
|
||||
|
||||
if (!track.initTrack) {
|
||||
throw new Error(`no init track for segment: ${track.name}`)
|
||||
}
|
||||
|
||||
const [buffer, stream] = segment.stream.release()
|
||||
|
||||
this.#backend.segment({
|
||||
init: track.initTrack,
|
||||
kind,
|
||||
header: segment.header,
|
||||
buffer,
|
||||
stream,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error in #runTrack:", error)
|
||||
} finally {
|
||||
await sub.close()
|
||||
}
|
||||
}
|
||||
|
||||
getCatalog() {
|
||||
return this.#catalog
|
||||
}
|
||||
|
||||
#onMessage(msg: Message.FromWorker) {
|
||||
if (msg.timeline) {
|
||||
//this.#timeline.update(msg.timeline)
|
||||
}
|
||||
}
|
||||
|
||||
async close(err?: Error) {
|
||||
if (err) this.#abort(err)
|
||||
else this.#close()
|
||||
|
||||
if (this.#connection) this.#connection.close()
|
||||
if (this.#backend) await this.#backend.close()
|
||||
}
|
||||
|
||||
async closed(): Promise<Error | undefined> {
|
||||
try {
|
||||
await this.#running
|
||||
} catch (e) {
|
||||
return asError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
play() {
|
||||
this.#backend.play({ minBuffer: 0.5 }) // TODO configurable
|
||||
}
|
||||
|
||||
seek(timestamp: number) {
|
||||
this.#backend.seek({ timestamp })
|
||||
}
|
||||
*/
|
||||
|
||||
async play() {
|
||||
await this.#backend.play()
|
||||
}
|
||||
|
||||
/*
|
||||
async *timeline() {
|
||||
for (;;) {
|
||||
const [timeline, next] = this.#timeline.value()
|
||||
if (timeline) yield timeline
|
||||
if (!next) break
|
||||
|
||||
await next
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
export { Player } from "./player"
|
||||
export type { PlayerConfig } from "./player"
|
||||
|
||||
63
packages/moq/playback/player.ts
Normal file
63
packages/moq/playback/player.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import * as Catalog from "../karp/catalog"
|
||||
import type { Connection } from "../transfork/connection"
|
||||
import { Broadcast } from "./broadcast"
|
||||
|
||||
export interface PlayerConfig {
|
||||
connection: Connection
|
||||
path: string[]
|
||||
canvas: HTMLCanvasElement
|
||||
}
|
||||
|
||||
// This class must be created on the main thread due to AudioContext.
|
||||
export class Player {
|
||||
#config: PlayerConfig
|
||||
#running: Promise<void>
|
||||
#active?: Broadcast
|
||||
|
||||
constructor(config: PlayerConfig) {
|
||||
this.#config = config
|
||||
this.#running = this.#run()
|
||||
}
|
||||
|
||||
async #run() {
|
||||
const announced = await this.#config.connection.announced(this.#config.path)
|
||||
|
||||
let activeId = -1
|
||||
|
||||
for (;;) {
|
||||
const announce = await announced.next()
|
||||
if (!announce) break
|
||||
|
||||
if (announce.path.length === this.#config.path.length) {
|
||||
throw new Error("expected resumable broadcast")
|
||||
}
|
||||
|
||||
const path = announce.path.slice(0, this.#config.path.length + 1)
|
||||
|
||||
const id = Number.parseInt(path[path.length - 1])
|
||||
if (id <= activeId) continue
|
||||
|
||||
const catalog = await Catalog.fetch(this.#config.connection, path)
|
||||
|
||||
this.#active?.close()
|
||||
this.#active = new Broadcast(this.#config.connection, catalog, this.#config.canvas)
|
||||
activeId = id
|
||||
}
|
||||
|
||||
this.#active?.close()
|
||||
}
|
||||
|
||||
close() {
|
||||
this.#config.connection.close()
|
||||
this.#active?.close()
|
||||
this.#active = undefined
|
||||
}
|
||||
|
||||
async closed() {
|
||||
await Promise.any([this.#running, this.#config.connection.closed()])
|
||||
}
|
||||
|
||||
unmute() {
|
||||
this.#active?.unmute()
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import type { Frame } from "../../media/mp4"
|
||||
export type { Frame }
|
||||
import type { Frame } from "../karp/frame"
|
||||
|
||||
export interface Range {
|
||||
start: number
|
||||
@@ -48,7 +47,7 @@ export class Component {
|
||||
// Get the next segment to render.
|
||||
const segments = this.#segments.readable.getReader()
|
||||
|
||||
let res
|
||||
let res: ReadableStreamReadResult<Segment> | ReadableStreamReadResult<Frame>
|
||||
if (this.#current) {
|
||||
// Get the next frame to render.
|
||||
const frames = this.#current.frames.getReader()
|
||||
@@ -85,17 +84,17 @@ export class Component {
|
||||
// Our segment is older than the current, abandon it.
|
||||
await value.frames.cancel("skipping segment; too old")
|
||||
continue
|
||||
} else {
|
||||
// Our segment is newer than the current, cancel the old one.
|
||||
await this.#current.frames.cancel("skipping segment; too slow")
|
||||
}
|
||||
|
||||
// Our segment is newer than the current, cancel the old one.
|
||||
await this.#current.frames.cancel("skipping segment; too slow")
|
||||
}
|
||||
|
||||
this.#current = value
|
||||
}
|
||||
}
|
||||
|
||||
async #cancel(reason: any) {
|
||||
async #cancel(reason: Error) {
|
||||
if (this.#current) {
|
||||
await this.#current.frames.cancel(reason)
|
||||
}
|
||||
@@ -111,8 +110,6 @@ export class Component {
|
||||
}
|
||||
|
||||
// Return if a type is a segment or frame
|
||||
// eslint-disable-next-line @typescript-eslint/no-redundant-type-constituents
|
||||
function isSegment(value: Segment | Frame): value is Segment {
|
||||
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
|
||||
return (value as Segment).frames !== undefined
|
||||
}
|
||||
@@ -10,10 +10,10 @@
|
||||
"path": "../common"
|
||||
},
|
||||
{
|
||||
"path": "../transport"
|
||||
"path": "../transfork"
|
||||
},
|
||||
{
|
||||
"path": "../media"
|
||||
"path": "../karp"
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
import { Frame, Component } from "./timeline"
|
||||
import * as MP4 from "../../media/mp4"
|
||||
import * as Message from "./message"
|
||||
import type * as Catalog from "../karp/catalog"
|
||||
import type { Frame } from "../karp/frame"
|
||||
import type { Component } from "./timeline"
|
||||
|
||||
export class Renderer {
|
||||
#canvas: OffscreenCanvas
|
||||
#track: Catalog.Video
|
||||
#canvas: HTMLCanvasElement
|
||||
#timeline: Component
|
||||
|
||||
#decoder!: VideoDecoder
|
||||
#queue: TransformStream<Frame, VideoFrame>
|
||||
|
||||
constructor(config: Message.ConfigVideo, timeline: Component) {
|
||||
this.#canvas = config.canvas
|
||||
constructor(track: Catalog.Video, canvas: HTMLCanvasElement, timeline: Component) {
|
||||
this.#track = track
|
||||
this.#canvas = canvas
|
||||
this.#timeline = timeline
|
||||
|
||||
this.#queue = new TransformStream({
|
||||
@@ -18,7 +20,11 @@ export class Renderer {
|
||||
transform: this.#transform.bind(this),
|
||||
})
|
||||
|
||||
this.#run().catch(console.error)
|
||||
this.#run().catch((err) => console.error("failed to run video renderer: ", err))
|
||||
}
|
||||
|
||||
close() {
|
||||
// TODO
|
||||
}
|
||||
|
||||
async #run() {
|
||||
@@ -47,36 +53,21 @@ export class Renderer {
|
||||
},
|
||||
error: console.error,
|
||||
})
|
||||
|
||||
this.#decoder.configure({
|
||||
codec: this.#track.codec,
|
||||
codedHeight: this.#track.resolution.height,
|
||||
codedWidth: this.#track.resolution.width,
|
||||
description: this.#track.description,
|
||||
optimizeForLatency: true,
|
||||
})
|
||||
}
|
||||
|
||||
#transform(frame: Frame) {
|
||||
// Configure the decoder with the first frame
|
||||
if (this.#decoder.state !== "configured") {
|
||||
const { sample, track } = frame
|
||||
|
||||
const desc = sample.description
|
||||
const box = desc.avcC ?? desc.hvcC ?? desc.vpcC ?? desc.av1C
|
||||
if (!box) throw new Error(`unsupported codec: ${track.codec}`)
|
||||
|
||||
const buffer = new MP4.Stream(undefined, 0, MP4.Stream.BIG_ENDIAN)
|
||||
box.write(buffer)
|
||||
const description = new Uint8Array(buffer.buffer, 8) // Remove the box header.
|
||||
|
||||
if (!MP4.isVideoTrack(track)) throw new Error("expected video track")
|
||||
|
||||
this.#decoder.configure({
|
||||
codec: track.codec,
|
||||
codedHeight: track.video.height,
|
||||
codedWidth: track.video.width,
|
||||
description,
|
||||
// optimizeForLatency: true
|
||||
})
|
||||
}
|
||||
|
||||
const chunk = new EncodedVideoChunk({
|
||||
type: frame.sample.is_sync ? "key" : "delta",
|
||||
data: frame.sample.data,
|
||||
timestamp: frame.sample.dts / frame.track.timescale,
|
||||
type: frame.type,
|
||||
data: frame.data,
|
||||
timestamp: frame.timestamp,
|
||||
})
|
||||
|
||||
this.#decoder.decode(chunk)
|
||||
@@ -1,73 +0,0 @@
|
||||
import * as Message from "./message"
|
||||
import { Ring } from "../../common/ring"
|
||||
import { Component, Frame } from "./timeline"
|
||||
import * as MP4 from "../../media/mp4"
|
||||
|
||||
// This is run in a worker.
|
||||
export class Renderer {
|
||||
#ring: Ring
|
||||
#timeline: Component
|
||||
|
||||
#decoder!: AudioDecoder
|
||||
#stream: TransformStream<Frame, AudioData>
|
||||
|
||||
constructor(config: Message.ConfigAudio, timeline: Component) {
|
||||
this.#timeline = timeline
|
||||
this.#ring = new Ring(config.ring)
|
||||
|
||||
this.#stream = new TransformStream({
|
||||
start: this.#start.bind(this),
|
||||
transform: this.#transform.bind(this),
|
||||
})
|
||||
|
||||
this.#run().catch(console.error)
|
||||
}
|
||||
|
||||
#start(controller: TransformStreamDefaultController) {
|
||||
this.#decoder = new AudioDecoder({
|
||||
output: (frame: AudioData) => {
|
||||
controller.enqueue(frame)
|
||||
},
|
||||
error: console.warn,
|
||||
})
|
||||
}
|
||||
|
||||
#transform(frame: Frame) {
|
||||
if (this.#decoder.state !== "configured") {
|
||||
const track = frame.track
|
||||
if (!MP4.isAudioTrack(track)) throw new Error("expected audio track")
|
||||
|
||||
// We only support OPUS right now which doesn't need a description.
|
||||
this.#decoder.configure({
|
||||
codec: track.codec,
|
||||
sampleRate: track.audio.sample_rate,
|
||||
numberOfChannels: track.audio.channel_count,
|
||||
})
|
||||
}
|
||||
|
||||
const chunk = new EncodedAudioChunk({
|
||||
type: frame.sample.is_sync ? "key" : "delta",
|
||||
timestamp: frame.sample.dts / frame.track.timescale,
|
||||
duration: frame.sample.duration,
|
||||
data: frame.sample.data,
|
||||
})
|
||||
|
||||
this.#decoder.decode(chunk)
|
||||
}
|
||||
|
||||
async #run() {
|
||||
const reader = this.#timeline.frames.pipeThrough(this.#stream).getReader()
|
||||
|
||||
for (;;) {
|
||||
const { value: frame, done } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
// Write audio samples to the ring buffer, dropping when there's no space.
|
||||
const written = this.#ring.write(frame)
|
||||
|
||||
if (written < frame.numberOfFrames) {
|
||||
console.warn(`droppped ${frame.numberOfFrames - written} audio samples`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
import { Timeline } from "./timeline"
|
||||
|
||||
import * as Audio from "./audio"
|
||||
import * as Video from "./video"
|
||||
|
||||
import * as MP4 from "../../media/mp4"
|
||||
import * as Message from "./message"
|
||||
import { asError } from "../../common/error"
|
||||
import { Deferred } from "../../common/async"
|
||||
import { GroupReader, Reader } from "../../transport/objects"
|
||||
|
||||
class Worker {
|
||||
// Timeline receives samples, buffering them and choosing the timestamp to render.
|
||||
#timeline = new Timeline()
|
||||
|
||||
// A map of init tracks.
|
||||
#inits = new Map<string, Deferred<Uint8Array>>()
|
||||
|
||||
// Renderer requests samples, rendering video frames and emitting audio frames.
|
||||
#audio?: Audio.Renderer
|
||||
#video?: Video.Renderer
|
||||
|
||||
on(e: MessageEvent) {
|
||||
const msg = e.data as Message.ToWorker
|
||||
|
||||
if (msg.config) {
|
||||
this.#onConfig(msg.config)
|
||||
} else if (msg.init) {
|
||||
// TODO buffer the init segmnet so we don't hold the stream open.
|
||||
this.#onInit(msg.init)
|
||||
} else if (msg.segment) {
|
||||
this.#onSegment(msg.segment).catch(console.warn)
|
||||
} else {
|
||||
throw new Error(`unknown message: + ${JSON.stringify(msg)}`)
|
||||
}
|
||||
}
|
||||
|
||||
#onConfig(msg: Message.Config) {
|
||||
if (msg.audio) {
|
||||
this.#audio = new Audio.Renderer(msg.audio, this.#timeline.audio)
|
||||
}
|
||||
|
||||
if (msg.video) {
|
||||
this.#video = new Video.Renderer(msg.video, this.#timeline.video)
|
||||
}
|
||||
}
|
||||
|
||||
#onInit(msg: Message.Init) {
|
||||
let init = this.#inits.get(msg.name)
|
||||
if (!init) {
|
||||
init = new Deferred()
|
||||
this.#inits.set(msg.name, init)
|
||||
}
|
||||
|
||||
init.resolve(msg.data)
|
||||
}
|
||||
|
||||
async #onSegment(msg: Message.Segment) {
|
||||
let init = this.#inits.get(msg.init)
|
||||
if (!init) {
|
||||
init = new Deferred()
|
||||
this.#inits.set(msg.init, init)
|
||||
}
|
||||
|
||||
// Create a new stream that we will use to decode.
|
||||
const container = new MP4.Parser(await init.promise)
|
||||
|
||||
const timeline = msg.kind === "audio" ? this.#timeline.audio : this.#timeline.video
|
||||
const reader = new GroupReader(msg.header, new Reader(msg.buffer, msg.stream))
|
||||
|
||||
// Create a queue that will contain each MP4 frame.
|
||||
const queue = new TransformStream<MP4.Frame>({})
|
||||
const segment = queue.writable.getWriter()
|
||||
|
||||
// Add the segment to the timeline
|
||||
const segments = timeline.segments.getWriter()
|
||||
await segments.write({
|
||||
sequence: msg.header.group,
|
||||
frames: queue.readable,
|
||||
})
|
||||
segments.releaseLock()
|
||||
|
||||
// Read each chunk, decoding the MP4 frames and adding them to the queue.
|
||||
for (;;) {
|
||||
const chunk = await reader.read()
|
||||
if (!chunk) {
|
||||
break
|
||||
}
|
||||
|
||||
if (!(chunk.payload instanceof Uint8Array)) {
|
||||
throw new Error(`invalid payload: ${chunk.payload}`)
|
||||
}
|
||||
|
||||
const frames = container.decode(chunk.payload)
|
||||
for (const frame of frames) {
|
||||
await segment.write(frame)
|
||||
}
|
||||
}
|
||||
|
||||
// We done.
|
||||
await segment.close()
|
||||
}
|
||||
}
|
||||
|
||||
// Pass all events to the worker
|
||||
const worker = new Worker()
|
||||
self.addEventListener("message", (msg) => {
|
||||
try {
|
||||
worker.on(msg)
|
||||
} catch (e) {
|
||||
const err = asError(e)
|
||||
console.warn("worker error:", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Validates this is an expected message
|
||||
function _send(msg: Message.FromWorker) {
|
||||
postMessage(msg)
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
import { GroupHeader } from "../../transport/objects"
|
||||
import { RingShared } from "../../common/ring"
|
||||
|
||||
export interface Config {
|
||||
audio?: ConfigAudio
|
||||
video?: ConfigVideo
|
||||
}
|
||||
|
||||
export interface ConfigAudio {
|
||||
channels: number
|
||||
sampleRate: number
|
||||
|
||||
ring: RingShared
|
||||
}
|
||||
|
||||
export interface ConfigVideo {
|
||||
canvas: OffscreenCanvas
|
||||
}
|
||||
|
||||
export interface Init {
|
||||
name: string // name of the init object
|
||||
data: Uint8Array
|
||||
}
|
||||
|
||||
export interface Segment {
|
||||
init: string // name of the init object
|
||||
kind: "audio" | "video"
|
||||
header: GroupHeader
|
||||
buffer: Uint8Array
|
||||
stream: ReadableStream<Uint8Array>
|
||||
}
|
||||
|
||||
/*
|
||||
export interface Play {
|
||||
// Start playback once the minimum buffer size has been reached.
|
||||
minBuffer: number
|
||||
}
|
||||
|
||||
export interface Seek {
|
||||
timestamp: number
|
||||
}
|
||||
*/
|
||||
|
||||
// Sent periodically with the current timeline info.
|
||||
export interface Timeline {
|
||||
// The current playback position
|
||||
timestamp?: number
|
||||
|
||||
// Audio specific information
|
||||
audio: TimelineAudio
|
||||
|
||||
// Video specific information
|
||||
video: TimelineVideo
|
||||
}
|
||||
|
||||
export interface TimelineAudio {
|
||||
buffer: Range[]
|
||||
}
|
||||
|
||||
export interface TimelineVideo {
|
||||
buffer: Range[]
|
||||
}
|
||||
|
||||
export interface Range {
|
||||
start: number
|
||||
end: number
|
||||
}
|
||||
|
||||
// Used to validate that only the correct messages can be sent.
|
||||
|
||||
// Any top level messages that can be sent to the worker.
|
||||
export interface ToWorker {
|
||||
// Sent to configure on startup.
|
||||
config?: Config
|
||||
|
||||
// Sent on each init/data stream
|
||||
init?: Init
|
||||
segment?: Segment
|
||||
|
||||
/*
|
||||
// Sent to control playback
|
||||
play?: Play
|
||||
seek?: Seek
|
||||
*/
|
||||
}
|
||||
|
||||
// Any top-level messages that can be sent from the worker.
|
||||
export interface FromWorker {
|
||||
// Sent back to the main thread regularly to update the UI
|
||||
timeline?: Timeline
|
||||
}
|
||||
|
||||
/*
|
||||
interface ToWorklet {
|
||||
config?: Audio.Config
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -1,6 +1,6 @@
|
||||
// TODO add support for @/ to avoid relative imports
|
||||
import { Ring } from "../../common/ring"
|
||||
import * as Message from "./message"
|
||||
import type * as Message from "./message"
|
||||
|
||||
class Renderer extends AudioWorkletProcessor {
|
||||
ring?: Ring
|
||||
@@ -26,17 +26,17 @@ class Renderer extends AudioWorkletProcessor {
|
||||
}
|
||||
|
||||
// Inputs and outputs in groups of 128 samples.
|
||||
process(inputs: Float32Array[][], outputs: Float32Array[][], _parameters: Record<string, Float32Array>): boolean {
|
||||
process(_inputs: Float32Array[][], outputs: Float32Array[][], _parameters: Record<string, Float32Array>): boolean {
|
||||
if (!this.ring) {
|
||||
// Paused
|
||||
return true
|
||||
}
|
||||
|
||||
if (inputs.length != 1 && outputs.length != 1) {
|
||||
if (outputs.length !== 1) {
|
||||
throw new Error("only a single track is supported")
|
||||
}
|
||||
|
||||
if (this.ring.size() == this.ring.capacity) {
|
||||
if (this.ring.size() === this.ring.capacity) {
|
||||
// This is a hack to clear any latency in the ring buffer.
|
||||
// The proper solution is to play back slightly faster?
|
||||
console.warn("resyncing ring buffer")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { RingShared } from "../../common/ring"
|
||||
import type { RingShared } from "../../common/ring"
|
||||
|
||||
export interface From {
|
||||
config?: Config
|
||||
@@ -7,6 +7,5 @@ export interface From {
|
||||
export interface Config {
|
||||
channels: number
|
||||
sampleRate: number
|
||||
|
||||
ring: RingShared
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user