feat: Add streaming support (#125)

This adds:
- [x] Keyboard and mouse handling on the frontend
- [x] Video and audio streaming from the backend to the frontend
- [x] Input server that works with Websockets

Update - 17/11
- [ ] Master docker container to run this
- [ ] Steam runtime
- [ ] Entrypoint.sh

---------

Co-authored-by: Kristian Ollikainen <14197772+DatCaptainHorse@users.noreply.github.com>
Co-authored-by: Kristian Ollikainen <DatCaptainHorse@users.noreply.github.com>
This commit is contained in:
Wanjohi
2024-12-08 14:54:56 +03:00
committed by GitHub
parent 5eb21eeadb
commit 379db1c87b
137 changed files with 12737 additions and 5234 deletions

View File

@@ -1,34 +1,57 @@
/// <reference types="vite/client" />
import * as Message from "./worker/message"
import { Ring, RingShared } from "../common/ring"
import type * as Catalog from "../karp/catalog"
import type { Frame } from "../karp/frame"
import type { Component } from "./timeline"
// This is a non-standard way of importing worklet/workers.
// Unfortunately, it's the only option because of a Vite bug: https://github.com/vitejs/vite/issues/11823
import workletURL from "./worklet/index.ts?worker&url"
// NOTE: This must be on the main thread
export class Audio {
context: AudioContext
worklet: Promise<AudioWorkletNode>
export class Renderer {
#context: AudioContext
#worklet: Promise<AudioWorkletNode>
constructor(config: Message.ConfigAudio) {
this.context = new AudioContext({
#ring: Ring
#ringShared: RingShared
#timeline: Component
#track: Catalog.Audio
#decoder!: AudioDecoder
#stream: TransformStream<Frame, AudioData>
constructor(track: Catalog.Audio, timeline: Component) {
this.#track = track
this.#context = new AudioContext({
latencyHint: "interactive",
sampleRate: config.sampleRate,
sampleRate: track.sample_rate,
})
this.worklet = this.load(config)
this.#worklet = this.load(track)
this.#timeline = timeline
this.#ringShared = new RingShared(2, track.sample_rate / 10) // 100ms
this.#ring = new Ring(this.#ringShared)
this.#stream = new TransformStream({
start: this.#start.bind(this),
transform: this.#transform.bind(this),
})
this.#run().catch((err) => console.error("failed to run audio renderer: ", err))
}
private async load(config: Message.ConfigAudio): Promise<AudioWorkletNode> {
private async load(catalog: Catalog.Audio): Promise<AudioWorkletNode> {
// Load the worklet source code.
await this.context.audioWorklet.addModule(workletURL)
await this.#context.audioWorklet.addModule(workletURL)
const volume = this.context.createGain()
const volume = this.#context.createGain()
volume.gain.value = 2.0
// Create the worklet
const worklet = new AudioWorkletNode(this.context, "renderer")
const worklet = new AudioWorkletNode(this.#context, "renderer")
worklet.port.addEventListener("message", this.on.bind(this))
worklet.onprocessorerror = (e: Event) => {
@@ -37,7 +60,13 @@ export class Audio {
// Connect the worklet to the volume node and then to the speakers
worklet.connect(volume)
volume.connect(this.context.destination)
volume.connect(this.#context.destination)
const config = {
sampleRate: catalog.sample_rate,
channelCount: catalog.channel_count,
ring: this.#ringShared,
}
worklet.port.postMessage({ config })
@@ -47,4 +76,58 @@ export class Audio {
private on(_event: MessageEvent) {
// TODO
}
play() {
this.#context.resume().catch((err) => console.warn("failed to resume audio context: ", err))
}
close() {
this.#context.close().catch((err) => console.warn("failed to close audio context: ", err))
}
#start(controller: TransformStreamDefaultController) {
this.#decoder = new AudioDecoder({
output: (frame: AudioData) => {
controller.enqueue(frame)
},
error: console.warn,
})
// We only support OPUS right now which doesn't need a description.
this.#decoder.configure({
codec: this.#track.codec,
sampleRate: this.#track.sample_rate,
numberOfChannels: this.#track.channel_count,
})
}
#transform(frame: Frame) {
const chunk = new EncodedAudioChunk({
type: frame.type,
timestamp: frame.timestamp,
data: frame.data,
})
this.#decoder.decode(chunk)
}
async #run() {
const reader = this.#timeline.frames.pipeThrough(this.#stream).getReader()
for (;;) {
const { value: frame, done } = await reader.read()
if (done) break
// Write audio samples to the ring buffer, dropping when there's no space.
const written = this.#ring.write(frame)
if (written < frame.numberOfFrames) {
/*
console.warn(
`droppped ${frame.numberOfFrames - written} audio samples`,
);
*/
}
}
}
}