mirror of
https://github.com/nestriness/nestri.git
synced 2025-12-11 00:05:36 +02:00
⭐ feat: Migrate from WebSocket to libp2p for peer-to-peer connectivity (#286)
## Description Whew, some stuff is still not re-implemented, but it's working! Rabbit's gonna explode with the amount of changes I reckon 😅 <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit - **New Features** - Introduced a peer-to-peer relay system using libp2p with enhanced stream forwarding, room state synchronization, and mDNS peer discovery. - Added decentralized room and participant management, metrics publishing, and safe, size-limited, concurrent message streaming with robust framing and callback dispatching. - Implemented asynchronous, callback-driven message handling over custom libp2p streams replacing WebSocket signaling. - **Improvements** - Migrated signaling and stream protocols from WebSocket to libp2p, improving reliability and scalability. - Simplified configuration and environment variables, removing deprecated flags and adding persistent data support. - Enhanced logging, error handling, and connection management for better observability and robustness. - Refined RTP header extension registration and NAT IP handling for improved WebRTC performance. - **Bug Fixes** - Improved ICE candidate buffering and SDP negotiation in WebRTC connections. - Fixed NAT IP and UDP port range configuration issues. - **Refactor** - Modularized codebase, reorganized relay and server logic, and removed deprecated WebSocket-based components. - Streamlined message structures, removed obsolete enums and message types, and simplified SafeMap concurrency. - Replaced WebSocket signaling with libp2p stream protocols in server and relay components. - **Chores** - Updated and cleaned dependencies across Go, Rust, and JavaScript packages. - Added `.gitignore` for persistent data directory in relay package. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Co-authored-by: DatCaptainHorse <DatCaptainHorse@users.noreply.github.com> Co-authored-by: Philipp Neumann <3daquawolf@gmail.com>
This commit is contained in:
committed by
GitHub
parent
e67a8d2b32
commit
6e82eff9e2
@@ -72,7 +72,7 @@ export default component$(() => {
|
||||
});
|
||||
|
||||
const lockPlay = $(async () => {
|
||||
if (!canvas.value || !playState.hasStream) return;
|
||||
if (!canvas.value || !playState.hasStream || playState.nestriLock) return;
|
||||
|
||||
try {
|
||||
await canvas.value.requestPointerLock();
|
||||
@@ -156,18 +156,22 @@ export default component$(() => {
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// eslint-disable-next-line qwik/no-use-visible-task
|
||||
useVisibleTask$(({ track }) => {
|
||||
track(() => canvas.value);
|
||||
if (!canvas.value) return; // Ensure canvas is available
|
||||
// Get query parameter "peerURL" from the URL
|
||||
let peerURL = new URLSearchParams(window.location.search).get("peerURL");
|
||||
if (!peerURL || peerURL.length <= 0) {
|
||||
peerURL = "/dnsaddr/relay.dathorse.com/p2p/12D3KooWPK4v5wKYNYx9oXWjqLM8Xix6nm13o91j1Feqq98fLBsw";
|
||||
}
|
||||
|
||||
setupPointerLockListener();
|
||||
try {
|
||||
if (!playState.video) {
|
||||
playState.video = document.createElement("video") as HTMLVideoElement
|
||||
playState.video = document.createElement("video") as HTMLVideoElement;
|
||||
playState.video.style.visibility = "hidden";
|
||||
playState.webrtc = noSerialize(new WebRTCStream("https://relay.dathorse.com", id, async (mediaStream) => {
|
||||
playState.webrtc = noSerialize(new WebRTCStream(peerURL, id, async (mediaStream) => {
|
||||
if (playState.video && mediaStream && playState.video.srcObject === null) {
|
||||
console.log("Setting mediastream");
|
||||
playState.video.srcObject = mediaStream;
|
||||
|
||||
@@ -10,21 +10,19 @@ WORKDIR /relay
|
||||
# TODO: Switch running layer to just alpine (doesn't need golang dev stack)
|
||||
|
||||
# ENV flags
|
||||
ENV REGEN_IDENTITY=false
|
||||
ENV VERBOSE=false
|
||||
ENV DEBUG=false
|
||||
ENV ENDPOINT_PORT=8088
|
||||
ENV MESH_PORT=8089
|
||||
ENV WEBRTC_UDP_START=10000
|
||||
ENV WEBRTC_UDP_END=20000
|
||||
ENV WEBRTC_UDP_START=0
|
||||
ENV WEBRTC_UDP_END=0
|
||||
ENV STUN_SERVER="stun.l.google.com:19302"
|
||||
ENV WEBRTC_UDP_MUX=8088
|
||||
ENV WEBRTC_NAT_IPS=""
|
||||
ENV AUTO_ADD_LOCAL_IP=true
|
||||
ENV TLS_CERT=""
|
||||
ENV TLS_KEY=""
|
||||
ENV PERSIST_DIR="./persist-data"
|
||||
|
||||
EXPOSE $ENDPOINT_PORT
|
||||
EXPOSE $MESH_PORT
|
||||
EXPOSE $WEBRTC_UDP_START-$WEBRTC_UDP_END/udp
|
||||
EXPOSE $WEBRTC_UDP_MUX/udp
|
||||
|
||||
|
||||
@@ -11,6 +11,18 @@
|
||||
"@bufbuild/protoc-gen-es": "^2.2.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@bufbuild/protobuf": "^2.2.3"
|
||||
"@bufbuild/protobuf": "^2.2.3",
|
||||
"@chainsafe/libp2p-noise": "^16.1.3",
|
||||
"@chainsafe/libp2p-yamux": "^7.0.1",
|
||||
"@libp2p/identify": "^3.0.32",
|
||||
"@libp2p/interface": "^2.10.2",
|
||||
"@libp2p/ping": "^2.0.32",
|
||||
"@libp2p/websockets": "^9.2.13",
|
||||
"@multiformats/multiaddr": "^12.4.0",
|
||||
"it-length-prefixed": "^10.0.1",
|
||||
"it-pipe": "^3.0.1",
|
||||
"libp2p": "^2.8.8",
|
||||
"uint8arraylist": "^2.4.8",
|
||||
"uint8arrays": "^5.1.0"
|
||||
}
|
||||
}
|
||||
@@ -1,37 +1,305 @@
|
||||
import {LatencyTracker} from "./latency";
|
||||
import { LatencyTracker } from "./latency";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
import { allocUnsafe } from "uint8arrays/alloc";
|
||||
import { pipe } from "it-pipe";
|
||||
import { decode, encode } from "it-length-prefixed";
|
||||
import { Stream } from "@libp2p/interface";
|
||||
|
||||
export interface MessageBase {
|
||||
payload_type: string;
|
||||
latency?: LatencyTracker;
|
||||
}
|
||||
|
||||
export interface MessageRaw extends MessageBase {
|
||||
data: any;
|
||||
}
|
||||
|
||||
export function NewMessageRaw(type: string, data: any): Uint8Array {
|
||||
const msg = {
|
||||
payload_type: type,
|
||||
data: data,
|
||||
};
|
||||
return new TextEncoder().encode(JSON.stringify(msg));
|
||||
}
|
||||
|
||||
export interface MessageICE extends MessageBase {
|
||||
payload_type: "ice";
|
||||
candidate: RTCIceCandidateInit;
|
||||
}
|
||||
|
||||
export function NewMessageICE(
|
||||
type: string,
|
||||
candidate: RTCIceCandidateInit,
|
||||
): Uint8Array {
|
||||
const msg = {
|
||||
payload_type: type,
|
||||
candidate: candidate,
|
||||
};
|
||||
return new TextEncoder().encode(JSON.stringify(msg));
|
||||
}
|
||||
|
||||
export interface MessageSDP extends MessageBase {
|
||||
payload_type: "sdp";
|
||||
sdp: RTCSessionDescriptionInit;
|
||||
}
|
||||
|
||||
export enum JoinerType {
|
||||
JoinerNode = 0,
|
||||
JoinerClient = 1,
|
||||
export function NewMessageSDP(
|
||||
type: string,
|
||||
sdp: RTCSessionDescriptionInit,
|
||||
): Uint8Array {
|
||||
const msg = {
|
||||
payload_type: type,
|
||||
sdp: sdp,
|
||||
};
|
||||
return new TextEncoder().encode(JSON.stringify(msg));
|
||||
}
|
||||
|
||||
export interface MessageJoin extends MessageBase {
|
||||
payload_type: "join";
|
||||
joiner_type: JoinerType;
|
||||
const MAX_SIZE = 1024 * 1024; // 1MB
|
||||
const MAX_QUEUE_SIZE = 1000; // Maximum number of messages in the queue
|
||||
|
||||
// Custom 4-byte length encoder
|
||||
export const length4ByteEncoder = (length: number) => {
|
||||
const buf = allocUnsafe(4);
|
||||
|
||||
// Write the length as a 32-bit unsigned integer (4 bytes)
|
||||
buf[0] = length >>> 24;
|
||||
buf[1] = (length >>> 16) & 0xff;
|
||||
buf[2] = (length >>> 8) & 0xff;
|
||||
buf[3] = length & 0xff;
|
||||
|
||||
// Set the bytes property to 4
|
||||
length4ByteEncoder.bytes = 4;
|
||||
|
||||
return buf;
|
||||
};
|
||||
length4ByteEncoder.bytes = 4;
|
||||
|
||||
// Custom 4-byte length decoder
|
||||
export const length4ByteDecoder = (data: Uint8ArrayList) => {
|
||||
if (data.byteLength < 4) {
|
||||
// Not enough bytes to read the length
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Read the length from the first 4 bytes
|
||||
let length = 0;
|
||||
length =
|
||||
(data.subarray(0, 1)[0] >>> 0) * 0x1000000 +
|
||||
(data.subarray(1, 2)[0] >>> 0) * 0x10000 +
|
||||
(data.subarray(2, 3)[0] >>> 0) * 0x100 +
|
||||
(data.subarray(3, 4)[0] >>> 0);
|
||||
|
||||
// Set bytes read to 4
|
||||
length4ByteDecoder.bytes = 4;
|
||||
|
||||
return length;
|
||||
};
|
||||
length4ByteDecoder.bytes = 4;
|
||||
|
||||
interface PromiseMessage {
|
||||
data: Uint8Array;
|
||||
resolve: () => void;
|
||||
reject: (error: Error) => void;
|
||||
}
|
||||
|
||||
export enum AnswerType {
|
||||
AnswerOffline = 0,
|
||||
AnswerInUse,
|
||||
AnswerOK
|
||||
}
|
||||
export class SafeStream {
|
||||
private stream: Stream;
|
||||
private callbacks: Map<string, ((data: any) => void)[]> = new Map();
|
||||
private isReading: boolean = false;
|
||||
private isWriting: boolean = false;
|
||||
private closed: boolean = false;
|
||||
private messageQueue: PromiseMessage[] = [];
|
||||
private writeLock = false;
|
||||
private readRetries = 0;
|
||||
private writeRetries = 0;
|
||||
private readonly MAX_RETRIES = 5;
|
||||
|
||||
export interface MessageAnswer extends MessageBase {
|
||||
payload_type: "answer";
|
||||
answer_type: AnswerType;
|
||||
constructor(stream: Stream) {
|
||||
this.stream = stream;
|
||||
this.startReading();
|
||||
this.startWriting();
|
||||
}
|
||||
|
||||
private async startReading(): Promise<void> {
|
||||
if (this.isReading || this.closed) return;
|
||||
|
||||
this.isReading = true;
|
||||
|
||||
try {
|
||||
const source = this.stream.source;
|
||||
const decodedSource = decode(source, {
|
||||
maxDataLength: MAX_SIZE,
|
||||
lengthDecoder: length4ByteDecoder,
|
||||
});
|
||||
|
||||
for await (const chunk of decodedSource) {
|
||||
if (this.closed) break;
|
||||
|
||||
this.readRetries = 0;
|
||||
|
||||
try {
|
||||
const data = chunk.slice();
|
||||
const message = JSON.parse(
|
||||
new TextDecoder().decode(data),
|
||||
) as MessageBase;
|
||||
const msgType = message.payload_type;
|
||||
|
||||
if (this.callbacks.has(msgType)) {
|
||||
const handlers = this.callbacks.get(msgType)!;
|
||||
for (const handler of handlers) {
|
||||
try {
|
||||
handler(message);
|
||||
} catch (err) {
|
||||
console.error(`Error in message handler for ${msgType}:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error("Error processing message:", err);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error("Stream reading error:", err);
|
||||
} finally {
|
||||
this.isReading = false;
|
||||
this.readRetries++;
|
||||
|
||||
// If not closed, try to restart reading
|
||||
if (!this.closed && this.readRetries < this.MAX_RETRIES)
|
||||
setTimeout(() => this.startReading(), 100);
|
||||
else if (this.readRetries >= this.MAX_RETRIES)
|
||||
console.error(
|
||||
"Max retries reached for reading stream, stopping attempts",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public registerCallback(
|
||||
msgType: string,
|
||||
callback: (data: any) => void,
|
||||
): void {
|
||||
if (!this.callbacks.has(msgType)) {
|
||||
this.callbacks.set(msgType, []);
|
||||
}
|
||||
|
||||
this.callbacks.get(msgType)!.push(callback);
|
||||
}
|
||||
|
||||
public removeCallback(msgType: string, callback: (data: any) => void): void {
|
||||
if (this.callbacks.has(msgType)) {
|
||||
const callbacks = this.callbacks.get(msgType)!;
|
||||
const index = callbacks.indexOf(callback);
|
||||
|
||||
if (index !== -1) {
|
||||
callbacks.splice(index, 1);
|
||||
}
|
||||
|
||||
if (callbacks.length === 0) {
|
||||
this.callbacks.delete(msgType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async startWriting(): Promise<void> {
|
||||
if (this.isWriting || this.closed) return;
|
||||
|
||||
this.isWriting = true;
|
||||
|
||||
try {
|
||||
// Create an async generator for real-time message processing
|
||||
const messageSource = async function* (this: SafeStream) {
|
||||
while (!this.closed) {
|
||||
// Check if we have messages to send
|
||||
if (this.messageQueue.length > 0) {
|
||||
this.writeLock = true;
|
||||
|
||||
try {
|
||||
const message = this.messageQueue[0];
|
||||
|
||||
// Encode the message
|
||||
const encoded = encode([message.data], {
|
||||
maxDataLength: MAX_SIZE,
|
||||
lengthEncoder: length4ByteEncoder,
|
||||
});
|
||||
|
||||
for await (const chunk of encoded) {
|
||||
yield chunk;
|
||||
}
|
||||
|
||||
// Remove message after successful sending
|
||||
this.writeRetries = 0;
|
||||
const sentMessage = this.messageQueue.shift();
|
||||
if (sentMessage)
|
||||
sentMessage.resolve();
|
||||
} catch (err) {
|
||||
console.error("Error encoding or sending message:", err);
|
||||
const failedMessage = this.messageQueue.shift();
|
||||
if (failedMessage)
|
||||
failedMessage.reject(new Error(`Failed to send message: ${err}`));
|
||||
} finally {
|
||||
this.writeLock = false;
|
||||
}
|
||||
} else {
|
||||
// No messages to send, wait for a short period
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
}
|
||||
}.bind(this);
|
||||
|
||||
await pipe(messageSource(), this.stream.sink).catch((err) => {
|
||||
console.error("Sink error:", err);
|
||||
this.isWriting = false;
|
||||
this.writeRetries++;
|
||||
|
||||
// Try to restart if not closed
|
||||
if (!this.closed && this.writeRetries < this.MAX_RETRIES) {
|
||||
setTimeout(() => this.startWriting(), 1000);
|
||||
} else if (this.writeRetries >= this.MAX_RETRIES) {
|
||||
console.error("Max retries reached for writing to stream sink, stopping attempts");
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
console.error("Stream writing error:", err);
|
||||
this.isWriting = false;
|
||||
this.writeRetries++;
|
||||
|
||||
// Try to restart if not closed
|
||||
if (!this.closed && this.writeRetries < this.MAX_RETRIES) {
|
||||
setTimeout(() => this.startWriting(), 1000);
|
||||
} else if (this.writeRetries >= this.MAX_RETRIES) {
|
||||
console.error("Max retries reached for writing stream, stopping attempts");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public async writeMessage(message: Uint8Array): Promise<void> {
|
||||
if (this.closed) {
|
||||
throw new Error("Cannot write to closed stream");
|
||||
}
|
||||
|
||||
// Validate message size before queuing
|
||||
if (message.length > MAX_SIZE) {
|
||||
throw new Error("Message size exceeds maximum size limit");
|
||||
}
|
||||
|
||||
// Check if the message queue is too large
|
||||
if (this.messageQueue.length >= MAX_QUEUE_SIZE) {
|
||||
throw new Error("Message queue is full, cannot write message");
|
||||
}
|
||||
|
||||
// Create a promise to resolve when the message is sent
|
||||
return new Promise((resolve, reject) => {
|
||||
this.messageQueue.push({ data: message, resolve, reject } as PromiseMessage);
|
||||
});
|
||||
}
|
||||
|
||||
public close(): void {
|
||||
this.closed = true;
|
||||
this.callbacks.clear();
|
||||
// Reject pending messages
|
||||
for (const msg of this.messageQueue)
|
||||
msg.reject(new Error("Stream closed"));
|
||||
|
||||
this.messageQueue = [];
|
||||
this.readRetries = 0;
|
||||
this.writeRetries = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,27 @@
|
||||
import {
|
||||
MessageBase,
|
||||
MessageICE,
|
||||
MessageJoin,
|
||||
MessageSDP,
|
||||
MessageAnswer,
|
||||
JoinerType,
|
||||
AnswerType,
|
||||
NewMessageRaw,
|
||||
NewMessageSDP,
|
||||
NewMessageICE,
|
||||
SafeStream,
|
||||
} from "./messages";
|
||||
import { webSockets } from "@libp2p/websockets";
|
||||
import { createLibp2p, Libp2p } from "libp2p";
|
||||
import { noise } from "@chainsafe/libp2p-noise";
|
||||
import { yamux } from "@chainsafe/libp2p-yamux";
|
||||
import { identify } from "@libp2p/identify";
|
||||
import { multiaddr } from "@multiformats/multiaddr";
|
||||
import { Connection } from "@libp2p/interface";
|
||||
import { ping } from "@libp2p/ping";
|
||||
|
||||
//FIXME: Sometimes the room will wait to say offline, then appear to be online after retrying :D
|
||||
// This works for me, with my trashy internet, does it work for you as well?
|
||||
|
||||
const NESTRI_PROTOCOL_STREAM_REQUEST = "/nestri-relay/stream-request/1.0.0";
|
||||
|
||||
export class WebRTCStream {
|
||||
private _ws: WebSocket | undefined = undefined;
|
||||
private _p2p: Libp2p | undefined = undefined;
|
||||
private _p2pConn: Connection | undefined = undefined;
|
||||
private _p2pSafeStream: SafeStream | undefined = undefined;
|
||||
private _pc: RTCPeerConnection | undefined = undefined;
|
||||
private _audioTrack: MediaStreamTrack | undefined = undefined;
|
||||
private _videoTrack: MediaStreamTrack | undefined = undefined;
|
||||
@@ -24,7 +33,11 @@ export class WebRTCStream {
|
||||
private _isConnected: boolean = false; // Add flag to track connection state
|
||||
currentFrameRate: number = 60;
|
||||
|
||||
constructor(serverURL: string, roomName: string, connectedCallback: (stream: MediaStream | null) => void) {
|
||||
constructor(
|
||||
serverURL: string,
|
||||
roomName: string,
|
||||
connectedCallback: (stream: MediaStream | null) => void,
|
||||
) {
|
||||
if (roomName.length <= 0) {
|
||||
console.error("Room name not provided");
|
||||
return;
|
||||
@@ -33,120 +46,114 @@ export class WebRTCStream {
|
||||
this._onConnected = connectedCallback;
|
||||
this._serverURL = serverURL;
|
||||
this._roomName = roomName;
|
||||
this._setup(serverURL, roomName);
|
||||
this._setup(serverURL, roomName).catch(console.error);
|
||||
}
|
||||
|
||||
private _setup(serverURL: string, roomName: string) {
|
||||
private async _setup(serverURL: string, roomName: string) {
|
||||
// Don't setup new connection if already connected
|
||||
if (this._isConnected) {
|
||||
console.log("Already connected, skipping setup");
|
||||
return;
|
||||
}
|
||||
|
||||
console.log("Setting up WebSocket");
|
||||
const wsURL = serverURL.replace(/^http/, "ws");
|
||||
this._ws = new WebSocket(`${wsURL}/api/ws/${roomName}`);
|
||||
this._ws.onopen = async () => {
|
||||
console.log("WebSocket opened");
|
||||
// Send join message
|
||||
const joinMessage: MessageJoin = {
|
||||
payload_type: "join",
|
||||
joiner_type: JoinerType.JoinerClient
|
||||
};
|
||||
this._ws!.send(JSON.stringify(joinMessage));
|
||||
}
|
||||
console.log("Setting up libp2p");
|
||||
|
||||
let iceHolder: RTCIceCandidateInit[] = [];
|
||||
this._p2p = await createLibp2p({
|
||||
transports: [webSockets()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => {
|
||||
return false;
|
||||
},
|
||||
},
|
||||
services: {
|
||||
identify: identify(),
|
||||
ping: ping(),
|
||||
},
|
||||
});
|
||||
|
||||
this._ws.onmessage = async (e) => {
|
||||
// allow only JSON
|
||||
if (typeof e.data === "object") return;
|
||||
if (!e.data) return;
|
||||
const message = JSON.parse(e.data) as MessageBase;
|
||||
switch (message.payload_type) {
|
||||
case "sdp":
|
||||
this._p2p.addEventListener("peer:connect", async (e) => {
|
||||
console.debug("Peer connected:", e.detail);
|
||||
});
|
||||
this._p2p.addEventListener("peer:disconnect", (e) => {
|
||||
console.debug("Peer disconnected:", e.detail);
|
||||
});
|
||||
|
||||
const ma = multiaddr(serverURL);
|
||||
console.debug("Dialing peer at:", ma.toString());
|
||||
this._p2pConn = await this._p2p.dial(ma);
|
||||
|
||||
if (this._p2pConn) {
|
||||
console.log("Stream is being established");
|
||||
let stream = await this._p2pConn
|
||||
.newStream(NESTRI_PROTOCOL_STREAM_REQUEST)
|
||||
.catch(console.error);
|
||||
if (stream) {
|
||||
this._p2pSafeStream = new SafeStream(stream);
|
||||
console.log("Stream opened with peer");
|
||||
|
||||
let iceHolder: RTCIceCandidateInit[] = [];
|
||||
this._p2pSafeStream.registerCallback("ice-candidate", (data) => {
|
||||
if (this._pc) {
|
||||
if (this._pc.remoteDescription) {
|
||||
this._pc.addIceCandidate(data.candidate).catch((err) => {
|
||||
console.error("Error adding ICE candidate:", err);
|
||||
});
|
||||
// Add held candidates
|
||||
iceHolder.forEach((candidate) => {
|
||||
this._pc!.addIceCandidate(candidate).catch((err) => {
|
||||
console.error("Error adding held ICE candidate:", err);
|
||||
});
|
||||
});
|
||||
iceHolder = [];
|
||||
} else {
|
||||
iceHolder.push(data.candidate);
|
||||
}
|
||||
} else {
|
||||
iceHolder.push(data.candidate);
|
||||
}
|
||||
});
|
||||
|
||||
this._p2pSafeStream.registerCallback("offer", async (data) => {
|
||||
if (!this._pc) {
|
||||
// Setup peer connection now
|
||||
this._setupPeerConnection();
|
||||
}
|
||||
console.log("Received SDP: ", (message as MessageSDP).sdp);
|
||||
await this._pc!.setRemoteDescription((message as MessageSDP).sdp);
|
||||
await this._pc!.setRemoteDescription(data.sdp);
|
||||
// Create our answer
|
||||
const answer = await this._pc!.createAnswer();
|
||||
// Force stereo in Chromium browsers
|
||||
answer.sdp = this.forceOpusStereo(answer.sdp!);
|
||||
await this._pc!.setLocalDescription(answer);
|
||||
this._ws!.send(JSON.stringify({
|
||||
payload_type: "sdp",
|
||||
sdp: answer
|
||||
}));
|
||||
break;
|
||||
case "ice":
|
||||
if (!this._pc) break;
|
||||
if (this._pc.remoteDescription) {
|
||||
try {
|
||||
await this._pc.addIceCandidate((message as MessageICE).candidate);
|
||||
// Add held ICE candidates
|
||||
for (const ice of iceHolder) {
|
||||
try {
|
||||
await this._pc.addIceCandidate(ice);
|
||||
} catch (e) {
|
||||
console.error("Error adding held ICE candidate: ", e);
|
||||
}
|
||||
}
|
||||
iceHolder = [];
|
||||
} catch (e) {
|
||||
console.error("Error adding ICE candidate: ", e);
|
||||
}
|
||||
} else {
|
||||
iceHolder.push((message as MessageICE).candidate);
|
||||
}
|
||||
break;
|
||||
case "answer":
|
||||
switch ((message as MessageAnswer).answer_type) {
|
||||
case AnswerType.AnswerOffline:
|
||||
console.log("Room is offline");
|
||||
// Call callback with null stream
|
||||
if (this._onConnected)
|
||||
this._onConnected(null);
|
||||
// Send answer back
|
||||
const answerMsg = NewMessageSDP("answer", answer);
|
||||
await this._p2pSafeStream?.writeMessage(answerMsg);
|
||||
});
|
||||
|
||||
break;
|
||||
case AnswerType.AnswerInUse:
|
||||
console.warn("Room is in use, we shouldn't even be getting this message");
|
||||
break;
|
||||
case AnswerType.AnswerOK:
|
||||
console.log("Joining Room was successful");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
console.error("Unknown message type: ", message);
|
||||
this._p2pSafeStream.registerCallback("request-stream-offline", (data) => {
|
||||
console.warn("Stream is offline for room:", data.roomName);
|
||||
this._onConnected?.(null);
|
||||
});
|
||||
|
||||
// Send stream request
|
||||
// marshal room name into json
|
||||
const request = NewMessageRaw(
|
||||
"request-stream-room",
|
||||
roomName,
|
||||
);
|
||||
await this._p2pSafeStream.writeMessage(request);
|
||||
}
|
||||
}
|
||||
|
||||
this._ws.onclose = () => {
|
||||
console.log("WebSocket closed, reconnecting in 3 seconds");
|
||||
if (this._onConnected)
|
||||
this._onConnected(null);
|
||||
|
||||
// Clear PeerConnection
|
||||
this._cleanupPeerConnection()
|
||||
|
||||
this._handleConnectionFailure()
|
||||
// setTimeout(() => {
|
||||
// this._setup(serverURL, roomName);
|
||||
// }, this._connectionTimeout);
|
||||
}
|
||||
|
||||
this._ws.onerror = (e) => {
|
||||
console.error("WebSocket error: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Forces opus to stereo in Chromium browsers, because of course
|
||||
private forceOpusStereo(SDP: string): string {
|
||||
// Look for "minptime=10;useinbandfec=1" and replace with "minptime=10;useinbandfec=1;stereo=1;sprop-stereo=1;"
|
||||
return SDP.replace(/(minptime=10;useinbandfec=1)/, "$1;stereo=1;sprop-stereo=1;");
|
||||
return SDP.replace(
|
||||
/(minptime=10;useinbandfec=1)/,
|
||||
"$1;stereo=1;sprop-stereo=1;",
|
||||
);
|
||||
}
|
||||
|
||||
private _setupPeerConnection() {
|
||||
@@ -158,43 +165,50 @@ export class WebRTCStream {
|
||||
this._pc = new RTCPeerConnection({
|
||||
iceServers: [
|
||||
{
|
||||
urls: "stun:stun.l.google.com:19302"
|
||||
}
|
||||
urls: "stun:stun.l.google.com:19302",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
this._pc.ontrack = (e) => {
|
||||
console.log("Track received: ", e.track);
|
||||
if (e.track.kind === "audio")
|
||||
this._audioTrack = e.track;
|
||||
else if (e.track.kind === "video")
|
||||
this._videoTrack = e.track;
|
||||
console.debug("Track received: ", e.track);
|
||||
if (e.track.kind === "audio") this._audioTrack = e.track;
|
||||
else if (e.track.kind === "video") this._videoTrack = e.track;
|
||||
|
||||
this._checkConnectionState();
|
||||
};
|
||||
|
||||
this._pc.onconnectionstatechange = () => {
|
||||
console.log("Connection state changed to: ", this._pc!.connectionState);
|
||||
console.debug("Connection state changed to: ", this._pc!.connectionState);
|
||||
this._checkConnectionState();
|
||||
};
|
||||
|
||||
this._pc.oniceconnectionstatechange = () => {
|
||||
console.log("ICE connection state changed to: ", this._pc!.iceConnectionState);
|
||||
console.debug(
|
||||
"ICE connection state changed to: ",
|
||||
this._pc!.iceConnectionState,
|
||||
);
|
||||
this._checkConnectionState();
|
||||
};
|
||||
|
||||
this._pc.onicegatheringstatechange = () => {
|
||||
console.log("ICE gathering state changed to: ", this._pc!.iceGatheringState);
|
||||
console.debug(
|
||||
"ICE gathering state changed to: ",
|
||||
this._pc!.iceGatheringState,
|
||||
);
|
||||
this._checkConnectionState();
|
||||
};
|
||||
|
||||
this._pc.onicecandidate = (e) => {
|
||||
if (e.candidate) {
|
||||
const message: MessageICE = {
|
||||
payload_type: "ice",
|
||||
candidate: e.candidate
|
||||
};
|
||||
this._ws!.send(JSON.stringify(message));
|
||||
const iceMsg = NewMessageICE("ice-candidate", e.candidate);
|
||||
if (this._p2pSafeStream) {
|
||||
this._p2pSafeStream.writeMessage(iceMsg).catch((err) =>
|
||||
console.error("Error sending ICE candidate:", err),
|
||||
);
|
||||
} else {
|
||||
console.warn("P2P stream not established, cannot send ICE candidate");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -207,26 +221,35 @@ export class WebRTCStream {
|
||||
private _checkConnectionState() {
|
||||
if (!this._pc) return;
|
||||
|
||||
console.log("Checking connection state:", {
|
||||
console.debug("Checking connection state:", {
|
||||
connectionState: this._pc.connectionState,
|
||||
iceConnectionState: this._pc.iceConnectionState,
|
||||
hasAudioTrack: !!this._audioTrack,
|
||||
hasVideoTrack: !!this._videoTrack,
|
||||
isConnected: this._isConnected
|
||||
isConnected: this._isConnected,
|
||||
});
|
||||
|
||||
if (this._pc.connectionState === "connected" && this._audioTrack !== undefined && this._videoTrack !== undefined) {
|
||||
if (
|
||||
this._pc.connectionState === "connected" &&
|
||||
this._audioTrack !== undefined &&
|
||||
this._videoTrack !== undefined
|
||||
) {
|
||||
this._clearConnectionTimer();
|
||||
if (!this._isConnected) {
|
||||
// Only trigger callback if not already connected
|
||||
this._isConnected = true;
|
||||
if (this._onConnected !== undefined) {
|
||||
this._onConnected(new MediaStream([this._audioTrack, this._videoTrack]));
|
||||
this._onConnected(
|
||||
new MediaStream([this._audioTrack, this._videoTrack]),
|
||||
);
|
||||
|
||||
// Continuously set low-latency target
|
||||
this._pc.getReceivers().forEach((receiver: RTCRtpReceiver) => {
|
||||
let intervalLoop = setInterval(async () => {
|
||||
if (receiver.track.readyState !== "live" || (receiver.transport && receiver.transport.state !== "connected")) {
|
||||
if (
|
||||
receiver.track.readyState !== "live" ||
|
||||
(receiver.transport && receiver.transport.state !== "connected")
|
||||
) {
|
||||
clearInterval(intervalLoop);
|
||||
return;
|
||||
} else {
|
||||
@@ -239,9 +262,11 @@ export class WebRTCStream {
|
||||
}
|
||||
|
||||
this._gatherFrameRate();
|
||||
} else if (this._pc.connectionState === "failed" ||
|
||||
} else if (
|
||||
this._pc.connectionState === "failed" ||
|
||||
this._pc.connectionState === "closed" ||
|
||||
this._pc.iceConnectionState === "failed") {
|
||||
this._pc.iceConnectionState === "failed"
|
||||
) {
|
||||
console.log("Connection failed or closed, attempting reconnect");
|
||||
this._isConnected = false; // Reset connected state
|
||||
this._handleConnectionFailure();
|
||||
@@ -250,7 +275,8 @@ export class WebRTCStream {
|
||||
|
||||
private _handleConnectionFailure() {
|
||||
this._clearConnectionTimer();
|
||||
if (this._isConnected) { // Only notify if previously connected
|
||||
if (this._isConnected) {
|
||||
// Only notify if previously connected
|
||||
this._isConnected = false;
|
||||
if (this._onConnected) {
|
||||
this._onConnected(null);
|
||||
@@ -260,7 +286,7 @@ export class WebRTCStream {
|
||||
|
||||
// Attempt to reconnect only if not already connected
|
||||
if (!this._isConnected && this._serverURL && this._roomName) {
|
||||
this._setup(this._serverURL, this._roomName);
|
||||
this._setup(this._serverURL, this._roomName).catch((err) => console.error("Reconnection failed:", err));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,10 +302,8 @@ export class WebRTCStream {
|
||||
|
||||
if (this._audioTrack || this._videoTrack) {
|
||||
try {
|
||||
if (this._audioTrack)
|
||||
this._audioTrack.stop();
|
||||
if (this._videoTrack)
|
||||
this._videoTrack.stop();
|
||||
if (this._audioTrack) this._audioTrack.stop();
|
||||
if (this._videoTrack) this._videoTrack.stop();
|
||||
} catch (err) {
|
||||
console.error("Error stopping media tracks:", err);
|
||||
}
|
||||
@@ -308,16 +332,18 @@ export class WebRTCStream {
|
||||
private _setupDataChannelEvents() {
|
||||
if (!this._dataChannel) return;
|
||||
|
||||
this._dataChannel.onclose = () => console.log('sendChannel has closed')
|
||||
this._dataChannel.onopen = () => console.log('sendChannel has opened')
|
||||
this._dataChannel.onmessage = e => console.log(`Message from DataChannel '${this._dataChannel?.label}' payload '${e.data}'`)
|
||||
this._dataChannel.onclose = () => console.log("sendChannel has closed");
|
||||
this._dataChannel.onopen = () => console.log("sendChannel has opened");
|
||||
this._dataChannel.onmessage = (e) =>
|
||||
console.log(
|
||||
`Message from DataChannel '${this._dataChannel?.label}' payload '${e.data}'`,
|
||||
);
|
||||
}
|
||||
|
||||
private _gatherFrameRate() {
|
||||
if (this._pc === undefined || this._videoTrack === undefined)
|
||||
return;
|
||||
if (this._pc === undefined || this._videoTrack === undefined) return;
|
||||
|
||||
const videoInfoPromise = new Promise<{ fps: number}>((resolve) => {
|
||||
const videoInfoPromise = new Promise<{ fps: number }>((resolve) => {
|
||||
// Keep trying to get fps until it's found
|
||||
const interval = setInterval(async () => {
|
||||
if (this._pc === undefined) {
|
||||
@@ -329,7 +355,7 @@ export class WebRTCStream {
|
||||
stats.forEach((report) => {
|
||||
if (report.type === "inbound-rtp") {
|
||||
clearInterval(interval);
|
||||
|
||||
|
||||
resolve({ fps: report.framesPerSecond });
|
||||
}
|
||||
});
|
||||
@@ -337,25 +363,26 @@ export class WebRTCStream {
|
||||
});
|
||||
|
||||
videoInfoPromise.then((value) => {
|
||||
this.currentFrameRate = value.fps
|
||||
})
|
||||
this.currentFrameRate = value.fps;
|
||||
});
|
||||
}
|
||||
|
||||
// Send binary message through the data channel
|
||||
public sendBinary(data: Uint8Array) {
|
||||
if (this._dataChannel && this._dataChannel.readyState === "open")
|
||||
this._dataChannel.send(data);
|
||||
else
|
||||
console.log("Data channel not open or not established.");
|
||||
else console.log("Data channel not open or not established.");
|
||||
}
|
||||
|
||||
public disconnect() {
|
||||
this._clearConnectionTimer();
|
||||
this._cleanupPeerConnection();
|
||||
if (this._ws) {
|
||||
this._ws.close();
|
||||
this._ws = undefined;
|
||||
if (this._p2pConn) {
|
||||
this._p2pConn
|
||||
.close()
|
||||
.catch((err) => console.error("Error closing P2P connection:", err));
|
||||
this._p2pConn = undefined;
|
||||
}
|
||||
this._isConnected = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,9 +14,9 @@ type Resource struct {
|
||||
Auth struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
AuthFingerprintKey struct {
|
||||
/*AuthFingerprintKey struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
}*/
|
||||
Realtime struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
Authorizer string `json:"authorizer"`
|
||||
|
||||
1
packages/relay/.gitignore
vendored
Normal file
1
packages/relay/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
persist-data/
|
||||
@@ -3,16 +3,15 @@ module relay
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/libp2p/go-libp2p v0.41.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.13.1
|
||||
github.com/libp2p/go-reuseport v0.4.0
|
||||
github.com/multiformats/go-multiaddr v0.15.0
|
||||
github.com/oklog/ulid/v2 v2.1.0
|
||||
github.com/pion/ice/v4 v4.0.9
|
||||
github.com/pion/interceptor v0.1.37
|
||||
github.com/pion/rtp v1.8.13
|
||||
github.com/pion/webrtc/v4 v4.0.14
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pion/ice/v4 v4.0.10
|
||||
github.com/pion/interceptor v0.1.38
|
||||
github.com/pion/rtp v1.8.15
|
||||
github.com/pion/webrtc/v4 v4.1.1
|
||||
google.golang.org/protobuf v1.36.6
|
||||
)
|
||||
|
||||
@@ -26,32 +25,35 @@ require (
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/elastic/gosigar v0.14.3 // indirect
|
||||
github.com/filecoin-project/go-clock v0.1.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.6.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koron/go-ssdp v0.0.5 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.64 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
@@ -66,7 +68,7 @@ require (
|
||||
github.com/multiformats/go-multistream v0.6.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.1 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
@@ -76,37 +78,39 @@ require (
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/sctp v1.8.37 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.11 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.50.1 // indirect
|
||||
github.com/quic-go/quic-go v0.52.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
go.uber.org/dig v1.18.1 // indirect
|
||||
go.uber.org/fx v1.23.0 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
lukechampine.com/blake3 v1.4.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
)
|
||||
|
||||
@@ -9,7 +9,6 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@@ -47,6 +46,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
|
||||
github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
@@ -85,8 +86,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
@@ -102,8 +103,8 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
@@ -118,8 +119,8 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
|
||||
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -131,8 +132,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
|
||||
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
|
||||
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
|
||||
github.com/libp2p/go-libp2p v0.41.1 h1:8ecNQVT5ev/jqALTvisSJeVNvXYJyK4NhQx1nNRXQZE=
|
||||
github.com/libp2p/go-libp2p v0.41.1/go.mod h1:DcGTovJzQl/I7HMrby5ZRjeD0kQkGiy+9w6aEkSZpRI=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
@@ -149,17 +150,19 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
|
||||
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
@@ -201,12 +204,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
|
||||
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
@@ -221,10 +224,10 @@ github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||
github.com/pion/ice/v4 v4.0.9 h1:VKgU4MwA2LUDVLq+WBkpEHTcAb8c5iCvFMECeuPOZNk=
|
||||
github.com/pion/ice/v4 v4.0.9/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.38 h1:Mgt3XIIq47uR5vcLLahfRucE6tFPjxHak+z5ZZFEzLU=
|
||||
github.com/pion/interceptor v0.1.38/go.mod h1:HS9X+Ue5LDE6q2C2tuvOuO83XkBdJFgn6MBDtfoJX4Q=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||
@@ -234,12 +237,12 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.13 h1:8uSUPpjSL4OlwZI8Ygqu7+h2p9NPFB+yAZ461Xn5sNg=
|
||||
github.com/pion/rtp v1.8.13/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
||||
github.com/pion/sctp v1.8.37 h1:ZDmGPtRPX9mKCiVXtMbTWybFw3z/hVKAZgU81wcOrqs=
|
||||
github.com/pion/sctp v1.8.37/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.11 h1:VhgVSopdsBKwhCFoyyPmT1fKMeV9nLMrEKxNOdy3IVI=
|
||||
github.com/pion/sdp/v3 v3.0.11/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/rtp v1.8.15 h1:MuhuGn1cxpVCPLNY1lI7F1tQ8Spntpgf12ob+pOYT8s=
|
||||
github.com/pion/rtp v1.8.15/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
@@ -252,37 +255,39 @@ github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQp
|
||||
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
||||
github.com/pion/webrtc/v4 v4.0.14 h1:nyds/sFRR+HvmWoBa6wrL46sSfpArE0qR883MBW96lg=
|
||||
github.com/pion/webrtc/v4 v4.0.14/go.mod h1:R3+qTnQTS03UzwDarYecgioNf7DYgTsldxnCXB821Kk=
|
||||
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.1 h1:PMFPtLg1kpD2pVtun+LGUzA3k54JdFl87WO0Z1+HKug=
|
||||
github.com/pion/webrtc/v4 v4.1.1/go.mod h1:cgEGkcpxGkT6Di2ClBYO5lP9mFXbCfEOrkYUpjjCQO4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA=
|
||||
github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
|
||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -318,9 +323,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
@@ -336,23 +339,20 @@ github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/dig v1.18.1 h1:rLww6NuajVjeQn+49u5NcezUJEGwd5uXmyoCKW2g5Es=
|
||||
go.uber.org/dig v1.18.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
|
||||
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
@@ -369,20 +369,18 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
@@ -394,7 +392,6 @@ golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -402,15 +399,15 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -426,8 +423,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -440,10 +437,10 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -452,8 +449,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -464,33 +461,30 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -520,16 +514,14 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
|
||||
lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
|
||||
@@ -2,12 +2,13 @@ package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
|
||||
"github.com/libp2p/go-reuseport"
|
||||
"github.com/pion/ice/v4"
|
||||
"github.com/pion/interceptor"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var globalWebRTCAPI *webrtc.API
|
||||
@@ -24,17 +25,9 @@ func InitWebRTCAPI() error {
|
||||
// Media engine
|
||||
mediaEngine := &webrtc.MediaEngine{}
|
||||
|
||||
// Register additional header extensions to reduce latency
|
||||
// Playout Delay
|
||||
if err := mediaEngine.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{
|
||||
URI: ExtensionPlayoutDelay,
|
||||
}, webrtc.RTPCodecTypeVideo); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mediaEngine.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{
|
||||
URI: ExtensionPlayoutDelay,
|
||||
}, webrtc.RTPCodecTypeAudio); err != nil {
|
||||
return err
|
||||
// Register our extensions
|
||||
if err := RegisterExtensions(mediaEngine); err != nil {
|
||||
return fmt.Errorf("failed to register extensions: %w", err)
|
||||
}
|
||||
|
||||
// Default codecs cover most of our needs
|
||||
@@ -75,9 +68,10 @@ func InitWebRTCAPI() error {
|
||||
// New in v4, reduces CPU usage and latency when enabled
|
||||
settingEngine.EnableSCTPZeroChecksum(true)
|
||||
|
||||
nat11IPs := GetFlags().NAT11IPs
|
||||
if len(nat11IPs) > 0 {
|
||||
settingEngine.SetNAT1To1IPs(nat11IPs, webrtc.ICECandidateTypeHost)
|
||||
nat11IP := GetFlags().NAT11IP
|
||||
if len(nat11IP) > 0 {
|
||||
settingEngine.SetNAT1To1IPs([]string{nat11IP}, webrtc.ICECandidateTypeSrflx)
|
||||
slog.Info("Using NAT 1:1 IP for WebRTC", "nat11_ip", nat11IP)
|
||||
}
|
||||
|
||||
muxPort := GetFlags().UDPMuxPort
|
||||
@@ -85,7 +79,7 @@ func InitWebRTCAPI() error {
|
||||
// Use reuseport to allow multiple listeners on the same port
|
||||
pktListener, err := reuseport.ListenPacket("udp", ":"+strconv.Itoa(muxPort))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDP listener: %w", err)
|
||||
return fmt.Errorf("failed to create WebRTC muxed UDP listener: %w", err)
|
||||
}
|
||||
|
||||
mux := ice.NewMultiUDPMuxDefault(ice.NewUDPMuxDefault(ice.UDPMuxParams{
|
||||
@@ -95,10 +89,13 @@ func InitWebRTCAPI() error {
|
||||
settingEngine.SetICEUDPMux(mux)
|
||||
}
|
||||
|
||||
// Set the UDP port range used by WebRTC
|
||||
err = settingEngine.SetEphemeralUDPPortRange(uint16(flags.WebRTCUDPStart), uint16(flags.WebRTCUDPEnd))
|
||||
if err != nil {
|
||||
return err
|
||||
if flags.WebRTCUDPStart > 0 && flags.WebRTCUDPEnd > 0 && flags.WebRTCUDPStart < flags.WebRTCUDPEnd {
|
||||
// Set the UDP port range used by WebRTC
|
||||
err = settingEngine.SetEphemeralUDPPortRange(uint16(flags.WebRTCUDPStart), uint16(flags.WebRTCUDPEnd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slog.Info("Using WebRTC UDP Port Range", "start", flags.WebRTCUDPStart, "end", flags.WebRTCUDPEnd)
|
||||
}
|
||||
|
||||
settingEngine.SetIncludeLoopbackCandidate(true) // Just in case
|
||||
@@ -109,11 +106,6 @@ func InitWebRTCAPI() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetWebRTCAPI returns the global WebRTC API
|
||||
func GetWebRTCAPI() *webrtc.API {
|
||||
return globalWebRTCAPI
|
||||
}
|
||||
|
||||
// CreatePeerConnection sets up a new peer connection
|
||||
func CreatePeerConnection(onClose func()) (*webrtc.PeerConnection, error) {
|
||||
pc, err := globalWebRTCAPI.NewPeerConnection(globalWebRTCConfig)
|
||||
|
||||
@@ -1,19 +1,51 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/oklog/ulid/v2"
|
||||
)
|
||||
|
||||
func NewULID() (ulid.ULID, error) {
|
||||
return ulid.New(ulid.Timestamp(time.Now()), ulid.Monotonic(rand.Reader, 0))
|
||||
}
|
||||
|
||||
// Helper function to generate PSK from token
|
||||
func GeneratePSKFromToken(token string) ([]byte, error) {
|
||||
// Simple hash-based PSK generation (32 bytes for libp2p)
|
||||
hash := sha256.Sum256([]byte(token))
|
||||
return hash[:], nil
|
||||
// GenerateED25519Key generates a new ED25519 key
|
||||
func GenerateED25519Key() (ed25519.PrivateKey, error) {
|
||||
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate ED25519 key pair: %w", err)
|
||||
}
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
// SaveED25519Key saves an ED25519 private key to a path as a binary file
|
||||
func SaveED25519Key(privateKey ed25519.PrivateKey, filePath string) error {
|
||||
if privateKey == nil {
|
||||
return errors.New("private key cannot be nil")
|
||||
}
|
||||
if len(privateKey) != ed25519.PrivateKeySize {
|
||||
return errors.New("private key must be exactly 64 bytes for ED25519")
|
||||
}
|
||||
if err := os.WriteFile(filePath, privateKey, 0600); err != nil {
|
||||
return fmt.Errorf("failed to save ED25519 key to %s: %w", filePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadED25519Key loads an ED25519 private key binary file from a path
|
||||
func LoadED25519Key(filePath string) (ed25519.PrivateKey, error) {
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read ED25519 key from %s: %w", filePath, err)
|
||||
}
|
||||
if len(data) != ed25519.PrivateKeySize {
|
||||
return nil, fmt.Errorf("ED25519 key must be exactly %d bytes, got %d", ed25519.PrivateKeySize, len(data))
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,45 @@
|
||||
package common
|
||||
|
||||
import "github.com/pion/webrtc/v4"
|
||||
|
||||
const (
|
||||
ExtensionPlayoutDelay string = "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
|
||||
)
|
||||
|
||||
// ExtensionMap maps URIs to their IDs based on registration order
|
||||
// IMPORTANT: This must match the order in which extensions are registered in common.go!
|
||||
var ExtensionMap = map[string]uint8{
|
||||
ExtensionPlayoutDelay: 1,
|
||||
// ExtensionMap maps audio/video extension URIs to their IDs based on registration order
|
||||
var ExtensionMap = map[webrtc.RTPCodecType]map[string]uint8{}
|
||||
|
||||
func RegisterExtensions(mediaEngine *webrtc.MediaEngine) error {
|
||||
// Register additional header extensions to reduce latency
|
||||
// Playout Delay (Video)
|
||||
if err := mediaEngine.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{
|
||||
URI: ExtensionPlayoutDelay,
|
||||
}, webrtc.RTPCodecTypeVideo); err != nil {
|
||||
return err
|
||||
}
|
||||
// Playout Delay (Audio)
|
||||
if err := mediaEngine.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{
|
||||
URI: ExtensionPlayoutDelay,
|
||||
}, webrtc.RTPCodecTypeAudio); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register the extension IDs for both audio and video
|
||||
ExtensionMap[webrtc.RTPCodecTypeAudio] = map[string]uint8{
|
||||
ExtensionPlayoutDelay: 1,
|
||||
}
|
||||
ExtensionMap[webrtc.RTPCodecTypeVideo] = map[string]uint8{
|
||||
ExtensionPlayoutDelay: 1,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetExtension(codecType webrtc.RTPCodecType, extURI string) (uint8, bool) {
|
||||
cType, ok := ExtensionMap[codecType]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
extID, ok := cType[extURI]
|
||||
return extID, ok
|
||||
}
|
||||
|
||||
@@ -2,47 +2,43 @@ package common
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
var globalFlags *Flags
|
||||
|
||||
type Flags struct {
|
||||
Verbose bool // Log everything to console
|
||||
Debug bool // Enable debug mode, implies Verbose
|
||||
EndpointPort int // Port for HTTP/S and WS/S endpoint (TCP)
|
||||
MeshPort int // Port for Mesh connections (TCP)
|
||||
WebRTCUDPStart int // WebRTC UDP port range start - ignored if UDPMuxPort is set
|
||||
WebRTCUDPEnd int // WebRTC UDP port range end - ignored if UDPMuxPort is set
|
||||
STUNServer string // WebRTC STUN server
|
||||
UDPMuxPort int // WebRTC UDP mux port - if set, overrides UDP port range
|
||||
AutoAddLocalIP bool // Automatically add local IP to NAT 1 to 1 IPs
|
||||
NAT11IPs []string // WebRTC NAT 1 to 1 IP(s) - allows specifying host IP(s) if behind NAT
|
||||
TLSCert string // Path to TLS certificate
|
||||
TLSKey string // Path to TLS key
|
||||
ControlSecret string // Shared secret for this relay's control endpoint
|
||||
RegenIdentity bool // Remove old identity on startup and regenerate it
|
||||
Verbose bool // Log everything to console
|
||||
Debug bool // Enable debug mode, implies Verbose
|
||||
EndpointPort int // Port for HTTP/S and WS/S endpoint (TCP)
|
||||
WebRTCUDPStart int // WebRTC UDP port range start - ignored if UDPMuxPort is set
|
||||
WebRTCUDPEnd int // WebRTC UDP port range end - ignored if UDPMuxPort is set
|
||||
STUNServer string // WebRTC STUN server
|
||||
UDPMuxPort int // WebRTC UDP mux port - if set, overrides UDP port range
|
||||
AutoAddLocalIP bool // Automatically add local IP to NAT 1 to 1 IPs
|
||||
NAT11IP string // WebRTC NAT 1 to 1 IP - allows specifying IP of relay if behind NAT
|
||||
PersistDir string // Directory to save persistent data to
|
||||
}
|
||||
|
||||
func (flags *Flags) DebugLog() {
|
||||
slog.Info("Relay flags",
|
||||
slog.Debug("Relay flags",
|
||||
"regenIdentity", flags.RegenIdentity,
|
||||
"verbose", flags.Verbose,
|
||||
"debug", flags.Debug,
|
||||
"endpointPort", flags.EndpointPort,
|
||||
"meshPort", flags.MeshPort,
|
||||
"webrtcUDPStart", flags.WebRTCUDPStart,
|
||||
"webrtcUDPEnd", flags.WebRTCUDPEnd,
|
||||
"stunServer", flags.STUNServer,
|
||||
"webrtcUDPMux", flags.UDPMuxPort,
|
||||
"autoAddLocalIP", flags.AutoAddLocalIP,
|
||||
"webrtcNAT11IPs", strings.Join(flags.NAT11IPs, ","),
|
||||
"tlsCert", flags.TLSCert,
|
||||
"tlsKey", flags.TLSKey,
|
||||
"controlSecret", flags.ControlSecret,
|
||||
"webrtcNAT11IPs", flags.NAT11IP,
|
||||
"persistDir", flags.PersistDir,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -76,29 +72,25 @@ func InitFlags() {
|
||||
// Create Flags struct
|
||||
globalFlags = &Flags{}
|
||||
// Get flags
|
||||
flag.BoolVar(&globalFlags.RegenIdentity, "regenIdentity", getEnvAsBool("REGEN_IDENTITY", false), "Regenerate identity on startup")
|
||||
flag.BoolVar(&globalFlags.Verbose, "verbose", getEnvAsBool("VERBOSE", false), "Verbose mode")
|
||||
flag.BoolVar(&globalFlags.Debug, "debug", getEnvAsBool("DEBUG", false), "Debug mode")
|
||||
flag.IntVar(&globalFlags.EndpointPort, "endpointPort", getEnvAsInt("ENDPOINT_PORT", 8088), "HTTP endpoint port")
|
||||
flag.IntVar(&globalFlags.MeshPort, "meshPort", getEnvAsInt("MESH_PORT", 8089), "Mesh connections TCP port")
|
||||
flag.IntVar(&globalFlags.WebRTCUDPStart, "webrtcUDPStart", getEnvAsInt("WEBRTC_UDP_START", 10000), "WebRTC UDP port range start")
|
||||
flag.IntVar(&globalFlags.WebRTCUDPEnd, "webrtcUDPEnd", getEnvAsInt("WEBRTC_UDP_END", 20000), "WebRTC UDP port range end")
|
||||
flag.IntVar(&globalFlags.WebRTCUDPStart, "webrtcUDPStart", getEnvAsInt("WEBRTC_UDP_START", 0), "WebRTC UDP port range start")
|
||||
flag.IntVar(&globalFlags.WebRTCUDPEnd, "webrtcUDPEnd", getEnvAsInt("WEBRTC_UDP_END", 0), "WebRTC UDP port range end")
|
||||
flag.StringVar(&globalFlags.STUNServer, "stunServer", getEnvAsString("STUN_SERVER", "stun.l.google.com:19302"), "WebRTC STUN server")
|
||||
flag.IntVar(&globalFlags.UDPMuxPort, "webrtcUDPMux", getEnvAsInt("WEBRTC_UDP_MUX", 8088), "WebRTC UDP mux port")
|
||||
flag.BoolVar(&globalFlags.AutoAddLocalIP, "autoAddLocalIP", getEnvAsBool("AUTO_ADD_LOCAL_IP", true), "Automatically add local IP to NAT 1 to 1 IPs")
|
||||
// String with comma separated IPs
|
||||
nat11IPs := ""
|
||||
flag.StringVar(&nat11IPs, "webrtcNAT11IPs", getEnvAsString("WEBRTC_NAT_IPS", ""), "WebRTC NAT 1 to 1 IP(s), comma delimited")
|
||||
flag.StringVar(&globalFlags.TLSCert, "tlsCert", getEnvAsString("TLS_CERT", ""), "Path to TLS certificate")
|
||||
flag.StringVar(&globalFlags.TLSKey, "tlsKey", getEnvAsString("TLS_KEY", ""), "Path to TLS key")
|
||||
flag.StringVar(&globalFlags.ControlSecret, "controlSecret", getEnvAsString("CONTROL_SECRET", ""), "Shared secret for control endpoint")
|
||||
nat11IP := ""
|
||||
flag.StringVar(&nat11IP, "webrtcNAT11IP", getEnvAsString("WEBRTC_NAT_IP", ""), "WebRTC NAT 1 to 1 IP")
|
||||
flag.StringVar(&globalFlags.PersistDir, "persistDir", getEnvAsString("PERSIST_DIR", "./persist-data"), "Directory to save persistent data to")
|
||||
// Parse flags
|
||||
flag.Parse()
|
||||
|
||||
// If debug is enabled, verbose is also enabled
|
||||
if globalFlags.Debug {
|
||||
globalFlags.Verbose = true
|
||||
// If Debug is enabled, set ControlSecret to 1234
|
||||
globalFlags.ControlSecret = "1234"
|
||||
}
|
||||
|
||||
// ICE STUN servers
|
||||
@@ -108,24 +100,11 @@ func InitFlags() {
|
||||
},
|
||||
}
|
||||
|
||||
// Initialize NAT 1 to 1 IPs
|
||||
globalFlags.NAT11IPs = []string{}
|
||||
|
||||
// Get local IP
|
||||
if globalFlags.AutoAddLocalIP {
|
||||
globalFlags.NAT11IPs = append(globalFlags.NAT11IPs, getLocalIP())
|
||||
}
|
||||
|
||||
// Parse NAT 1 to 1 IPs from string
|
||||
if len(nat11IPs) > 0 {
|
||||
split := strings.Split(nat11IPs, ",")
|
||||
if len(split) > 0 {
|
||||
for _, ip := range split {
|
||||
globalFlags.NAT11IPs = append(globalFlags.NAT11IPs, ip)
|
||||
}
|
||||
} else {
|
||||
globalFlags.NAT11IPs = append(globalFlags.NAT11IPs, nat11IPs)
|
||||
}
|
||||
if len(nat11IP) > 0 {
|
||||
globalFlags.NAT11IP = nat11IP
|
||||
} else if globalFlags.AutoAddLocalIP {
|
||||
globalFlags.NAT11IP = getLocalIP()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,10 @@ package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
gen "relay/internal/proto"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type TimestampEntry struct {
|
||||
|
||||
175
packages/relay/internal/common/safebufio.go
Normal file
175
packages/relay/internal/common/safebufio.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MaxSize is the maximum allowed data size (1MB)
|
||||
const MaxSize = 1024 * 1024
|
||||
|
||||
// SafeBufioRW wraps a bufio.ReadWriter for sending and receiving JSON and protobufs safely
|
||||
type SafeBufioRW struct {
|
||||
brw *bufio.ReadWriter
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSafeBufioRW(brw *bufio.ReadWriter) *SafeBufioRW {
|
||||
return &SafeBufioRW{brw: brw}
|
||||
}
|
||||
|
||||
// SendJSON serializes the given data as JSON and sends it with a 4-byte length prefix
|
||||
func (bu *SafeBufioRW) SendJSON(data interface{}) error {
|
||||
bu.mutex.Lock()
|
||||
defer bu.mutex.Unlock()
|
||||
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(jsonData) > MaxSize {
|
||||
return errors.New("JSON data exceeds maximum size")
|
||||
}
|
||||
|
||||
// Write the 4-byte length prefix
|
||||
if err = binary.Write(bu.brw, binary.BigEndian, uint32(len(jsonData))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the JSON data
|
||||
if _, err = bu.brw.Write(jsonData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush the writer to ensure data is sent
|
||||
return bu.brw.Flush()
|
||||
}
|
||||
|
||||
// ReceiveJSON reads a 4-byte length prefix, then reads and unmarshals the JSON
|
||||
func (bu *SafeBufioRW) ReceiveJSON(dest interface{}) error {
|
||||
bu.mutex.RLock()
|
||||
defer bu.mutex.RUnlock()
|
||||
|
||||
// Read the 4-byte length prefix
|
||||
var length uint32
|
||||
if err := binary.Read(bu.brw, binary.BigEndian, &length); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if length > MaxSize {
|
||||
return errors.New("received JSON data exceeds maximum size")
|
||||
}
|
||||
|
||||
// Read the JSON data
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(bu.brw, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, dest)
|
||||
}
|
||||
|
||||
// Receive reads a 4-byte length prefix, then reads the raw data
|
||||
func (bu *SafeBufioRW) Receive() ([]byte, error) {
|
||||
bu.mutex.RLock()
|
||||
defer bu.mutex.RUnlock()
|
||||
|
||||
// Read the 4-byte length prefix
|
||||
var length uint32
|
||||
if err := binary.Read(bu.brw, binary.BigEndian, &length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if length > MaxSize {
|
||||
return nil, errors.New("received data exceeds maximum size")
|
||||
}
|
||||
|
||||
// Read the raw data
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(bu.brw, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// SendProto serializes the given protobuf message and sends it with a 4-byte length prefix
|
||||
func (bu *SafeBufioRW) SendProto(msg proto.Message) error {
|
||||
bu.mutex.Lock()
|
||||
defer bu.mutex.Unlock()
|
||||
|
||||
protoData, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(protoData) > MaxSize {
|
||||
return errors.New("protobuf data exceeds maximum size")
|
||||
}
|
||||
|
||||
// Write the 4-byte length prefix
|
||||
if err = binary.Write(bu.brw, binary.BigEndian, uint32(len(protoData))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the Protobuf data
|
||||
if _, err := bu.brw.Write(protoData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush the writer to ensure data is sent
|
||||
return bu.brw.Flush()
|
||||
}
|
||||
|
||||
// ReceiveProto reads a 4-byte length prefix, then reads and unmarshals the protobuf
|
||||
func (bu *SafeBufioRW) ReceiveProto(msg proto.Message) error {
|
||||
bu.mutex.RLock()
|
||||
defer bu.mutex.RUnlock()
|
||||
|
||||
// Read the 4-byte length prefix
|
||||
var length uint32
|
||||
if err := binary.Read(bu.brw, binary.BigEndian, &length); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if length > MaxSize {
|
||||
return errors.New("received Protobuf data exceeds maximum size")
|
||||
}
|
||||
|
||||
// Read the Protobuf data
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(bu.brw, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return proto.Unmarshal(data, msg)
|
||||
}
|
||||
|
||||
// Write writes raw data to the underlying buffer
|
||||
func (bu *SafeBufioRW) Write(data []byte) (int, error) {
|
||||
bu.mutex.Lock()
|
||||
defer bu.mutex.Unlock()
|
||||
|
||||
if len(data) > MaxSize {
|
||||
return 0, errors.New("data exceeds maximum size")
|
||||
}
|
||||
|
||||
n, err := bu.brw.Write(data)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Flush the writer to ensure data is sent
|
||||
if err = bu.brw.Flush(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
@@ -1,18 +1,11 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyNotFound = errors.New("key not found")
|
||||
ErrValueNotPointer = errors.New("value is not a pointer")
|
||||
ErrFieldNotFound = errors.New("field not found")
|
||||
ErrTypeMismatch = errors.New("type mismatch")
|
||||
)
|
||||
|
||||
// SafeMap is a generic thread-safe map with its own mutex
|
||||
type SafeMap[K comparable, V any] struct {
|
||||
mu sync.RWMutex
|
||||
@@ -34,6 +27,14 @@ func (sm *SafeMap[K, V]) Get(key K) (V, bool) {
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// Has checks if a key exists in the map
|
||||
func (sm *SafeMap[K, V]) Has(key K) bool {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
_, ok := sm.m[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Set adds or updates a value in the map
|
||||
func (sm *SafeMap[K, V]) Set(key K, value V) {
|
||||
sm.mu.Lock()
|
||||
@@ -66,36 +67,31 @@ func (sm *SafeMap[K, V]) Copy() map[K]V {
|
||||
return copied
|
||||
}
|
||||
|
||||
// Update updates a specific field in the value data
|
||||
func (sm *SafeMap[K, V]) Update(key K, fieldName string, newValue any) error {
|
||||
// Range iterates over the map and applies a function to each key-value pair
|
||||
func (sm *SafeMap[K, V]) Range(f func(K, V) bool) {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
for k, v := range sm.m {
|
||||
if !f(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SafeMap[K, V]) MarshalJSON() ([]byte, error) {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
return json.Marshal(sm.m)
|
||||
}
|
||||
|
||||
func (sm *SafeMap[K, V]) UnmarshalJSON(data []byte) error {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
|
||||
v, ok := sm.m[key]
|
||||
if !ok {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
|
||||
// Use reflect to update the field
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return ErrValueNotPointer
|
||||
}
|
||||
|
||||
rv = rv.Elem()
|
||||
// Check if the field exists
|
||||
field := rv.FieldByName(fieldName)
|
||||
if !field.IsValid() || !field.CanSet() {
|
||||
return ErrFieldNotFound
|
||||
}
|
||||
|
||||
newRV := reflect.ValueOf(newValue)
|
||||
if newRV.Type() != field.Type() {
|
||||
return ErrTypeMismatch
|
||||
}
|
||||
|
||||
field.Set(newRV)
|
||||
sm.m[key] = v
|
||||
|
||||
return nil
|
||||
return json.Unmarshal(data, &sm.m)
|
||||
}
|
||||
|
||||
func (sm *SafeMap[K, V]) String() string {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
return fmt.Sprintf("%+v", sm.m)
|
||||
}
|
||||
@@ -1,12 +1,15 @@
|
||||
package connections
|
||||
|
||||
import (
|
||||
"github.com/pion/webrtc/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"log/slog"
|
||||
gen "relay/internal/proto"
|
||||
|
||||
"github.com/pion/webrtc/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type OnMessageCallback func(data []byte)
|
||||
|
||||
// NestriDataChannel is a custom data channel with callbacks
|
||||
type NestriDataChannel struct {
|
||||
*webrtc.DataChannel
|
||||
@@ -37,7 +40,7 @@ func NewNestriDataChannel(dc *webrtc.DataChannel) *NestriDataChannel {
|
||||
// Handle message type callback
|
||||
if callback, ok := ndc.callbacks["input"]; ok {
|
||||
go callback(msg.Data)
|
||||
} // TODO: Log unknown message type?
|
||||
} // We don't care about unhandled messages
|
||||
})
|
||||
|
||||
return ndc
|
||||
|
||||
@@ -1,18 +1,32 @@
|
||||
package connections
|
||||
|
||||
import (
|
||||
"github.com/pion/webrtc/v4"
|
||||
"encoding/json"
|
||||
"relay/internal/common"
|
||||
"time"
|
||||
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
// MessageBase is the base type for WS/DC messages.
|
||||
// MessageBase is the base type for any JSON message
|
||||
type MessageBase struct {
|
||||
PayloadType string `json:"payload_type"`
|
||||
Latency *common.LatencyTracker `json:"latency,omitempty"`
|
||||
Type string `json:"payload_type"`
|
||||
Latency *common.LatencyTracker `json:"latency,omitempty"`
|
||||
}
|
||||
|
||||
type MessageRaw struct {
|
||||
MessageBase
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
func NewMessageRaw(t string, data json.RawMessage) *MessageRaw {
|
||||
return &MessageRaw{
|
||||
MessageBase: MessageBase{
|
||||
Type: t,
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// MessageLog represents a log message.
|
||||
type MessageLog struct {
|
||||
MessageBase
|
||||
Level string `json:"level"`
|
||||
@@ -20,7 +34,17 @@ type MessageLog struct {
|
||||
Time string `json:"time"`
|
||||
}
|
||||
|
||||
// MessageMetrics represents a metrics/heartbeat message.
|
||||
func NewMessageLog(t string, level, message, time string) *MessageLog {
|
||||
return &MessageLog{
|
||||
MessageBase: MessageBase{
|
||||
Type: t,
|
||||
},
|
||||
Level: level,
|
||||
Message: message,
|
||||
Time: time,
|
||||
}
|
||||
}
|
||||
|
||||
type MessageMetrics struct {
|
||||
MessageBase
|
||||
UsageCPU float64 `json:"usage_cpu"`
|
||||
@@ -29,104 +53,42 @@ type MessageMetrics struct {
|
||||
PipelineLatency float64 `json:"pipeline_latency"`
|
||||
}
|
||||
|
||||
// MessageICECandidate represents an ICE candidate message.
|
||||
type MessageICECandidate struct {
|
||||
MessageBase
|
||||
Candidate webrtc.ICECandidateInit `json:"candidate"`
|
||||
}
|
||||
|
||||
// MessageSDP represents an SDP message.
|
||||
type MessageSDP struct {
|
||||
MessageBase
|
||||
SDP webrtc.SessionDescription `json:"sdp"`
|
||||
}
|
||||
|
||||
// JoinerType is an enum for the type of incoming room joiner
|
||||
type JoinerType int
|
||||
|
||||
const (
|
||||
JoinerNode JoinerType = iota
|
||||
JoinerClient
|
||||
)
|
||||
|
||||
func (jt *JoinerType) String() string {
|
||||
switch *jt {
|
||||
case JoinerNode:
|
||||
return "node"
|
||||
case JoinerClient:
|
||||
return "client"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// MessageJoin is used to tell us that either participant or ingest wants to join the room
|
||||
type MessageJoin struct {
|
||||
MessageBase
|
||||
JoinerType JoinerType `json:"joiner_type"`
|
||||
}
|
||||
|
||||
// AnswerType is an enum for the type of answer, signaling Room state for a joiner
|
||||
type AnswerType int
|
||||
|
||||
const (
|
||||
AnswerOffline AnswerType = iota // For participant/client, when the room is offline without stream
|
||||
AnswerInUse // For ingest/node joiner, when the room is already in use by another ingest/node
|
||||
AnswerOK // For both, when the join request is handled successfully
|
||||
)
|
||||
|
||||
// MessageAnswer is used to send the answer to a join request
|
||||
type MessageAnswer struct {
|
||||
MessageBase
|
||||
AnswerType AnswerType `json:"answer_type"`
|
||||
}
|
||||
|
||||
// SendLogMessageWS sends a log message to the given WebSocket connection.
|
||||
func (ws *SafeWebSocket) SendLogMessageWS(level, message string) error {
|
||||
msg := MessageLog{
|
||||
MessageBase: MessageBase{PayloadType: "log"},
|
||||
Level: level,
|
||||
Message: message,
|
||||
Time: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
return ws.SendJSON(msg)
|
||||
}
|
||||
|
||||
// SendMetricsMessageWS sends a metrics message to the given WebSocket connection.
|
||||
func (ws *SafeWebSocket) SendMetricsMessageWS(usageCPU, usageMemory float64, uptime uint64, pipelineLatency float64) error {
|
||||
msg := MessageMetrics{
|
||||
MessageBase: MessageBase{PayloadType: "metrics"},
|
||||
func NewMessageMetrics(t string, usageCPU, usageMemory float64, uptime uint64, pipelineLatency float64) *MessageMetrics {
|
||||
return &MessageMetrics{
|
||||
MessageBase: MessageBase{
|
||||
Type: t,
|
||||
},
|
||||
UsageCPU: usageCPU,
|
||||
UsageMemory: usageMemory,
|
||||
Uptime: uptime,
|
||||
PipelineLatency: pipelineLatency,
|
||||
}
|
||||
return ws.SendJSON(msg)
|
||||
}
|
||||
|
||||
// SendICECandidateMessageWS sends an ICE candidate message to the given WebSocket connection.
|
||||
func (ws *SafeWebSocket) SendICECandidateMessageWS(candidate webrtc.ICECandidateInit) error {
|
||||
msg := MessageICECandidate{
|
||||
MessageBase: MessageBase{PayloadType: "ice"},
|
||||
Candidate: candidate,
|
||||
}
|
||||
return ws.SendJSON(msg)
|
||||
type MessageICE struct {
|
||||
MessageBase
|
||||
Candidate webrtc.ICECandidateInit `json:"candidate"`
|
||||
}
|
||||
|
||||
// SendSDPMessageWS sends an SDP message to the given WebSocket connection.
|
||||
func (ws *SafeWebSocket) SendSDPMessageWS(sdp webrtc.SessionDescription) error {
|
||||
msg := MessageSDP{
|
||||
MessageBase: MessageBase{PayloadType: "sdp"},
|
||||
SDP: sdp,
|
||||
func NewMessageICE(t string, candidate webrtc.ICECandidateInit) *MessageICE {
|
||||
return &MessageICE{
|
||||
MessageBase: MessageBase{
|
||||
Type: t,
|
||||
},
|
||||
Candidate: candidate,
|
||||
}
|
||||
return ws.SendJSON(msg)
|
||||
}
|
||||
|
||||
// SendAnswerMessageWS sends an answer message to the given WebSocket connection.
|
||||
func (ws *SafeWebSocket) SendAnswerMessageWS(answer AnswerType) error {
|
||||
msg := MessageAnswer{
|
||||
MessageBase: MessageBase{PayloadType: "answer"},
|
||||
AnswerType: answer,
|
||||
}
|
||||
return ws.SendJSON(msg)
|
||||
type MessageSDP struct {
|
||||
MessageBase
|
||||
SDP webrtc.SessionDescription `json:"sdp"`
|
||||
}
|
||||
|
||||
func NewMessageSDP(t string, sdp webrtc.SessionDescription) *MessageSDP {
|
||||
return &MessageSDP{
|
||||
MessageBase: MessageBase{
|
||||
Type: t,
|
||||
},
|
||||
SDP: sdp,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
package connections
|
||||
|
||||
import (
|
||||
"github.com/pion/webrtc/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
gen "relay/internal/proto"
|
||||
)
|
||||
|
||||
// SendMeshHandshake sends a handshake message to another relay.
|
||||
func (ws *SafeWebSocket) SendMeshHandshake(relayID, publicKey string) error {
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_Handshake{
|
||||
Handshake: &gen.Handshake{
|
||||
RelayId: relayID,
|
||||
DhPublicKey: publicKey,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
|
||||
// SendMeshHandshakeResponse sends a handshake response to a relay.
|
||||
func (ws *SafeWebSocket) SendMeshHandshakeResponse(relayID, dhPublicKey string, approvals map[string]string) error {
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_HandshakeResponse{
|
||||
HandshakeResponse: &gen.HandshakeResponse{
|
||||
RelayId: relayID,
|
||||
DhPublicKey: dhPublicKey,
|
||||
Approvals: approvals,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
|
||||
// SendMeshForwardSDP sends a forwarded SDP message to another relay
|
||||
func (ws *SafeWebSocket) SendMeshForwardSDP(roomName, participantID string, sdp webrtc.SessionDescription) error {
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_ForwardSdp{
|
||||
ForwardSdp: &gen.ForwardSDP{
|
||||
RoomName: roomName,
|
||||
ParticipantId: participantID,
|
||||
Sdp: sdp.SDP,
|
||||
Type: sdp.Type.String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
|
||||
// SendMeshForwardICE sends a forwarded ICE candidate to another relay
|
||||
func (ws *SafeWebSocket) SendMeshForwardICE(roomName, participantID string, candidate webrtc.ICECandidateInit) error {
|
||||
var sdpMLineIndex uint32
|
||||
if candidate.SDPMLineIndex != nil {
|
||||
sdpMLineIndex = uint32(*candidate.SDPMLineIndex)
|
||||
}
|
||||
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_ForwardIce{
|
||||
ForwardIce: &gen.ForwardICE{
|
||||
RoomName: roomName,
|
||||
ParticipantId: participantID,
|
||||
Candidate: &gen.ICECandidateInit{
|
||||
Candidate: candidate.Candidate,
|
||||
SdpMid: candidate.SDPMid,
|
||||
SdpMLineIndex: &sdpMLineIndex,
|
||||
UsernameFragment: candidate.UsernameFragment,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
|
||||
func (ws *SafeWebSocket) SendMeshForwardIngest(roomName string) error {
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_ForwardIngest{
|
||||
ForwardIngest: &gen.ForwardIngest{
|
||||
RoomName: roomName,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
|
||||
func (ws *SafeWebSocket) SendMeshStreamRequest(roomName string) error {
|
||||
msg := &gen.MeshMessage{
|
||||
Type: &gen.MeshMessage_StreamRequest{
|
||||
StreamRequest: &gen.StreamRequest{
|
||||
RoomName: roomName,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ws.SendBinary(data)
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package connections
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gorilla/websocket"
|
||||
"log/slog"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// OnMessageCallback is a callback for messages of given type
|
||||
type OnMessageCallback func(data []byte)
|
||||
|
||||
// SafeWebSocket is a websocket with a mutex
|
||||
type SafeWebSocket struct {
|
||||
*websocket.Conn
|
||||
sync.Mutex
|
||||
closed bool
|
||||
closeCallback func() // Callback to call on close
|
||||
closeChan chan struct{} // Channel to signal closure
|
||||
callbacks map[string]OnMessageCallback // MessageBase type -> callback
|
||||
binaryCallback OnMessageCallback // Binary message callback
|
||||
sharedSecret []byte
|
||||
}
|
||||
|
||||
// NewSafeWebSocket creates a new SafeWebSocket from *websocket.Conn
|
||||
func NewSafeWebSocket(conn *websocket.Conn) *SafeWebSocket {
|
||||
ws := &SafeWebSocket{
|
||||
Conn: conn,
|
||||
closed: false,
|
||||
closeCallback: nil,
|
||||
closeChan: make(chan struct{}),
|
||||
callbacks: make(map[string]OnMessageCallback),
|
||||
binaryCallback: nil,
|
||||
sharedSecret: nil,
|
||||
}
|
||||
|
||||
// Launch a goroutine to handle messages
|
||||
go func() {
|
||||
for {
|
||||
// Read message
|
||||
kind, data, err := ws.Conn.ReadMessage()
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure, websocket.CloseNoStatusReceived) {
|
||||
// If unexpected close error, break
|
||||
slog.Debug("WebSocket closed unexpectedly", "err", err)
|
||||
break
|
||||
} else if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseAbnormalClosure, websocket.CloseNoStatusReceived) {
|
||||
break
|
||||
} else if err != nil {
|
||||
slog.Error("Failed reading WebSocket message", "err", err)
|
||||
break
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case websocket.TextMessage:
|
||||
// Decode message
|
||||
var msg MessageBase
|
||||
if err = json.Unmarshal(data, &msg); err != nil {
|
||||
slog.Error("Failed decoding WebSocket message", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle message type callback
|
||||
if callback, ok := ws.callbacks[msg.PayloadType]; ok {
|
||||
callback(data)
|
||||
} // TODO: Log unknown message payload type?
|
||||
break
|
||||
case websocket.BinaryMessage:
|
||||
// Handle binary message callback
|
||||
if ws.binaryCallback != nil {
|
||||
ws.binaryCallback(data)
|
||||
}
|
||||
break
|
||||
default:
|
||||
slog.Warn("Unknown WebSocket message type", "type", kind)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Signal closure to callback first
|
||||
if ws.closeCallback != nil {
|
||||
ws.closeCallback()
|
||||
}
|
||||
close(ws.closeChan)
|
||||
ws.closed = true
|
||||
}()
|
||||
|
||||
return ws
|
||||
}
|
||||
|
||||
// SetSharedSecret sets the shared secret for the websocket
|
||||
func (ws *SafeWebSocket) SetSharedSecret(secret []byte) {
|
||||
ws.sharedSecret = secret
|
||||
}
|
||||
|
||||
// GetSharedSecret returns the shared secret for the websocket
|
||||
func (ws *SafeWebSocket) GetSharedSecret() []byte {
|
||||
return ws.sharedSecret
|
||||
}
|
||||
|
||||
// SendJSON writes JSON to a websocket with a mutex
|
||||
func (ws *SafeWebSocket) SendJSON(v interface{}) error {
|
||||
ws.Lock()
|
||||
defer ws.Unlock()
|
||||
return ws.Conn.WriteJSON(v)
|
||||
}
|
||||
|
||||
// SendBinary writes binary to a websocket with a mutex
|
||||
func (ws *SafeWebSocket) SendBinary(data []byte) error {
|
||||
ws.Lock()
|
||||
defer ws.Unlock()
|
||||
return ws.Conn.WriteMessage(websocket.BinaryMessage, data)
|
||||
}
|
||||
|
||||
// RegisterMessageCallback sets the callback for binary message of given type
|
||||
func (ws *SafeWebSocket) RegisterMessageCallback(msgType string, callback OnMessageCallback) {
|
||||
if ws.callbacks == nil {
|
||||
ws.callbacks = make(map[string]OnMessageCallback)
|
||||
}
|
||||
ws.callbacks[msgType] = callback
|
||||
}
|
||||
|
||||
// RegisterBinaryMessageCallback sets the callback for all binary messages
|
||||
func (ws *SafeWebSocket) RegisterBinaryMessageCallback(callback OnMessageCallback) {
|
||||
ws.binaryCallback = callback
|
||||
}
|
||||
|
||||
// UnregisterMessageCallback removes the callback for binary message of given type
|
||||
func (ws *SafeWebSocket) UnregisterMessageCallback(msgType string) {
|
||||
if ws.callbacks != nil {
|
||||
delete(ws.callbacks, msgType)
|
||||
}
|
||||
}
|
||||
|
||||
// UnregisterBinaryMessageCallback removes the callback for all binary messages
|
||||
func (ws *SafeWebSocket) UnregisterBinaryMessageCallback() {
|
||||
ws.binaryCallback = nil
|
||||
}
|
||||
|
||||
// RegisterOnClose sets the callback for websocket closing
|
||||
func (ws *SafeWebSocket) RegisterOnClose(callback func()) {
|
||||
ws.closeCallback = func() {
|
||||
// Clear our callbacks
|
||||
ws.callbacks = nil
|
||||
ws.binaryCallback = nil
|
||||
// Call the callback
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
// Closed returns a channel that closes when the WebSocket connection is terminated
|
||||
func (ws *SafeWebSocket) Closed() <-chan struct{} {
|
||||
return ws.closeChan
|
||||
}
|
||||
|
||||
// IsClosed returns true if the WebSocket connection is closed
|
||||
func (ws *SafeWebSocket) IsClosed() bool {
|
||||
return ws.closed
|
||||
}
|
||||
13
packages/relay/internal/core/consts.go
Normal file
13
packages/relay/internal/core/consts.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package core
|
||||
|
||||
import "time"
|
||||
|
||||
// --- Constants ---
|
||||
const (
|
||||
// PubSub Topics
|
||||
roomStateTopicName = "room-states"
|
||||
relayMetricsTopicName = "relay-metrics"
|
||||
|
||||
// Timers and Intervals
|
||||
metricsPublishInterval = 15 * time.Second // How often to publish own metrics
|
||||
)
|
||||
214
packages/relay/internal/core/core.go
Normal file
214
packages/relay/internal/core/core.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"relay/internal/common"
|
||||
"relay/internal/shared"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
// -- Variables --
|
||||
|
||||
var globalRelay *Relay
|
||||
|
||||
// -- Structs --
|
||||
|
||||
// RelayInfo contains light information of Relay, in mesh-friendly format
|
||||
type RelayInfo struct {
|
||||
ID peer.ID
|
||||
MeshAddrs []string // Addresses of this relay
|
||||
MeshRooms *common.SafeMap[string, shared.RoomInfo] // Rooms hosted by this relay
|
||||
MeshLatencies *common.SafeMap[string, time.Duration] // Latencies to other peers from this relay
|
||||
}
|
||||
|
||||
// Relay structure enhanced with metrics and state
|
||||
type Relay struct {
|
||||
RelayInfo
|
||||
|
||||
Host host.Host // libp2p host for peer-to-peer networking
|
||||
PubSub *pubsub.PubSub // PubSub for state synchronization
|
||||
PingService *ping.PingService
|
||||
|
||||
// Local
|
||||
LocalRooms *common.SafeMap[ulid.ULID, *shared.Room] // room ID -> local Room struct (hosted by this relay)
|
||||
LocalMeshPeers *common.SafeMap[peer.ID, *RelayInfo] // peer ID -> mesh peer relay info (connected to this relay)
|
||||
LocalMeshConnections *common.SafeMap[peer.ID, *webrtc.PeerConnection] // peer ID -> PeerConnection (connected to this relay)
|
||||
|
||||
// Protocols
|
||||
ProtocolRegistry
|
||||
|
||||
// PubSub Topics
|
||||
pubTopicState *pubsub.Topic // topic for room states
|
||||
pubTopicRelayMetrics *pubsub.Topic // topic for relay metrics/status
|
||||
}
|
||||
|
||||
func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay, error) {
|
||||
listenAddrs := []string{
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port), // IPv4 - Raw TCP
|
||||
fmt.Sprintf("/ip6/::/tcp/%d", port), // IPv6 - Raw TCP
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/ws", port), // IPv4 - TCP WebSocket
|
||||
fmt.Sprintf("/ip6/::/tcp/%d/ws", port), // IPv6 - TCP WebSocket
|
||||
}
|
||||
|
||||
var muAddrs []multiaddr.Multiaddr
|
||||
for _, addr := range listenAddrs {
|
||||
multiAddr, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse multiaddr '%s': %w", addr, err)
|
||||
}
|
||||
muAddrs = append(muAddrs, multiAddr)
|
||||
}
|
||||
|
||||
// Initialize libp2p host
|
||||
p2pHost, err := libp2p.New(
|
||||
// TODO: Currently static identity
|
||||
libp2p.Identity(identityKey),
|
||||
// Enable required transports
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(ws.New),
|
||||
// Other options
|
||||
libp2p.ListenAddrs(muAddrs...),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.EnableRelay(),
|
||||
libp2p.EnableHolePunching(),
|
||||
libp2p.EnableNATService(),
|
||||
libp2p.EnableAutoNATv2(),
|
||||
libp2p.ShareTCPListener(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create libp2p host for relay: %w", err)
|
||||
}
|
||||
|
||||
// Set up pubsub
|
||||
p2pPubsub, err := pubsub.NewGossipSub(ctx, p2pHost)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pubsub: %w, addrs: %v", err, p2pHost.Addrs())
|
||||
}
|
||||
|
||||
// Initialize Ping Service
|
||||
pingSvc := ping.NewPingService(p2pHost)
|
||||
|
||||
var addresses []string
|
||||
for _, addr := range p2pHost.Addrs() {
|
||||
addresses = append(addresses, addr.String())
|
||||
}
|
||||
|
||||
r := &Relay{
|
||||
RelayInfo: RelayInfo{
|
||||
ID: p2pHost.ID(),
|
||||
MeshAddrs: addresses,
|
||||
MeshRooms: common.NewSafeMap[string, shared.RoomInfo](),
|
||||
MeshLatencies: common.NewSafeMap[string, time.Duration](),
|
||||
},
|
||||
Host: p2pHost,
|
||||
PubSub: p2pPubsub,
|
||||
PingService: pingSvc,
|
||||
LocalRooms: common.NewSafeMap[ulid.ULID, *shared.Room](),
|
||||
LocalMeshPeers: common.NewSafeMap[peer.ID, *RelayInfo](),
|
||||
}
|
||||
|
||||
// Add network notifier after relay is initialized
|
||||
p2pHost.Network().Notify(&networkNotifier{relay: r})
|
||||
|
||||
// Set up PubSub topics and handlers
|
||||
if err = r.setupPubSub(ctx); err != nil {
|
||||
err = p2pHost.Close()
|
||||
if err != nil {
|
||||
slog.Error("Failed to close host after PubSub setup failure", "err", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to setup PubSub: %w", err)
|
||||
}
|
||||
|
||||
// Initialize Protocol Registry
|
||||
r.ProtocolRegistry = NewProtocolRegistry(r)
|
||||
|
||||
// Start discovery features
|
||||
if err = startMDNSDiscovery(r); err != nil {
|
||||
slog.Warn("Failed to initialize mDNS discovery, continuing without..", "error", err)
|
||||
}
|
||||
|
||||
// Start background tasks
|
||||
go r.periodicMetricsPublisher(ctx)
|
||||
|
||||
printConnectInstructions(p2pHost)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
|
||||
var err error
|
||||
persistentDir := common.GetFlags().PersistDir
|
||||
|
||||
// Load or generate identity key
|
||||
var identityKey crypto.PrivKey
|
||||
var privKey ed25519.PrivateKey
|
||||
// First check if we need to generate identity
|
||||
hasIdentity := len(persistentDir) > 0 && common.GetFlags().RegenIdentity == false
|
||||
if hasIdentity {
|
||||
_, err = os.Stat(persistentDir + "/identity.key")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to check identity key file: %w", err)
|
||||
} else if os.IsNotExist(err) {
|
||||
hasIdentity = false
|
||||
}
|
||||
}
|
||||
if !hasIdentity {
|
||||
// Make sure the persistent directory exists
|
||||
if err = os.MkdirAll(persistentDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create persistent data directory: %w", err)
|
||||
}
|
||||
// Generate
|
||||
slog.Info("Generating new identity for relay")
|
||||
privKey, err = common.GenerateED25519Key()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate new identity: %w", err)
|
||||
}
|
||||
// Save the key
|
||||
if err = common.SaveED25519Key(privKey, persistentDir+"/identity.key"); err != nil {
|
||||
return fmt.Errorf("failed to save identity key: %w", err)
|
||||
}
|
||||
slog.Info("New identity generated and saved", "path", persistentDir+"/identity.key")
|
||||
} else {
|
||||
slog.Info("Loading existing identity for relay", "path", persistentDir+"/identity.key")
|
||||
// Load the key
|
||||
privKey, err = common.LoadED25519Key(persistentDir + "/identity.key")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load identity key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to libp2p crypto.PrivKey
|
||||
identityKey, err = crypto.UnmarshalEd25519PrivateKey(privKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal ED25519 private key: %w", err)
|
||||
}
|
||||
|
||||
globalRelay, err = NewRelay(ctx, common.GetFlags().EndpointPort, identityKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relay: %w", err)
|
||||
}
|
||||
|
||||
if err = common.InitWebRTCAPI(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Relay initialized", "id", globalRelay.ID)
|
||||
return nil
|
||||
}
|
||||
38
packages/relay/internal/core/mdns.go
Normal file
38
packages/relay/internal/core/mdns.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
||||
)
|
||||
|
||||
const (
|
||||
mdnsDiscoveryRendezvous = "/nestri-relay/mdns-discovery/1.0.0" // Shared string for mDNS discovery
|
||||
)
|
||||
|
||||
type discoveryNotifee struct {
|
||||
relay *Relay
|
||||
}
|
||||
|
||||
func (d *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
|
||||
if d.relay != nil {
|
||||
if err := d.relay.connectToRelay(context.Background(), &pi); err != nil {
|
||||
slog.Error("failed to connect to discovered relay", "peer", pi.ID, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startMDNSDiscovery(relay *Relay) error {
|
||||
d := &discoveryNotifee{
|
||||
relay: relay,
|
||||
}
|
||||
|
||||
service := mdns.NewMdnsService(relay.Host, mdnsDiscoveryRendezvous, d)
|
||||
if err := service.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start mDNS discovery: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
128
packages/relay/internal/core/metrics.go
Normal file
128
packages/relay/internal/core/metrics.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// --- Metrics Collection and Publishing ---
|
||||
|
||||
// periodicMetricsPublisher periodically gathers local metrics and publishes them.
|
||||
func (r *Relay) periodicMetricsPublisher(ctx context.Context) {
|
||||
ticker := time.NewTicker(metricsPublishInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Publish immediately on start
|
||||
if err := r.publishRelayMetrics(ctx); err != nil {
|
||||
slog.Error("Failed to publish initial relay metrics", "err", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Info("Stopping metrics publisher")
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := r.publishRelayMetrics(ctx); err != nil {
|
||||
slog.Error("Failed to publish relay metrics", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// publishRelayMetrics sends the current relay status to the mesh.
|
||||
func (r *Relay) publishRelayMetrics(ctx context.Context) error {
|
||||
if r.pubTopicRelayMetrics == nil {
|
||||
slog.Warn("Cannot publish relay metrics: topic is nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check all peer latencies
|
||||
r.checkAllPeerLatencies(ctx)
|
||||
|
||||
data, err := json.Marshal(r.RelayInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal relay status: %w", err)
|
||||
}
|
||||
|
||||
if pubErr := r.pubTopicRelayMetrics.Publish(ctx, data); pubErr != nil {
|
||||
// Don't return error on publish failure, just log
|
||||
slog.Error("Failed to publish relay metrics message", "err", pubErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAllPeerLatencies measures latency to all currently connected peers.
|
||||
func (r *Relay) checkAllPeerLatencies(ctx context.Context) {
|
||||
var wg sync.WaitGroup
|
||||
for _, p := range r.Host.Network().Peers() {
|
||||
if p == r.ID {
|
||||
continue // Skip self
|
||||
}
|
||||
wg.Add(1)
|
||||
// Run checks concurrently
|
||||
go func(peerID peer.ID) {
|
||||
defer wg.Done()
|
||||
go r.measureLatencyToPeer(ctx, peerID)
|
||||
}(p)
|
||||
}
|
||||
wg.Wait() // Wait for all latency checks to complete
|
||||
}
|
||||
|
||||
// measureLatencyToPeer pings a specific peer and updates the local latency map.
|
||||
func (r *Relay) measureLatencyToPeer(ctx context.Context, peerID peer.ID) {
|
||||
// Check peer status first
|
||||
if !r.hasConnectedPeer(peerID) {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a context for the ping operation
|
||||
pingCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Use the PingService instance stored in the Relay struct
|
||||
if r.PingService == nil {
|
||||
slog.Error("PingService is nil, cannot measure latency", "peer", peerID)
|
||||
return
|
||||
}
|
||||
resultsCh := r.PingService.Ping(pingCtx, peerID)
|
||||
|
||||
// Wait for the result (or timeout)
|
||||
select {
|
||||
case <-pingCtx.Done():
|
||||
// Ping timed out
|
||||
slog.Warn("Latency check canceled", "peer", peerID, "err", pingCtx.Err())
|
||||
case result, ok := <-resultsCh:
|
||||
if !ok {
|
||||
// Channel closed unexpectedly
|
||||
slog.Warn("Ping service channel closed unexpectedly", "peer", peerID)
|
||||
return
|
||||
}
|
||||
|
||||
// Received ping result
|
||||
if result.Error != nil {
|
||||
slog.Warn("Latency check failed, removing peer from local peers map", "peer", peerID, "err", result.Error)
|
||||
// Remove from MeshPeers if ping failed
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Ping successful, update latency
|
||||
latency := result.RTT
|
||||
// Ensure latency is not zero if successful, assign a minimal value if so.
|
||||
// Sometimes RTT can be reported as 0 for very fast local connections.
|
||||
if latency <= 0 {
|
||||
latency = 1 * time.Microsecond
|
||||
}
|
||||
|
||||
r.RelayInfo.MeshLatencies.Set(peerID.String(), latency)
|
||||
}
|
||||
}
|
||||
128
packages/relay/internal/core/p2p.go
Normal file
128
packages/relay/internal/core/p2p.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// --- Structs ---
|
||||
|
||||
// networkNotifier logs connection events and updates relay state
|
||||
type networkNotifier struct {
|
||||
relay *Relay
|
||||
}
|
||||
|
||||
// Connected is called when a connection is established
|
||||
func (n *networkNotifier) Connected(net network.Network, conn network.Conn) {
|
||||
if n.relay == nil {
|
||||
n.relay.onPeerConnected(conn.RemotePeer())
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnected is called when a connection is terminated
|
||||
func (n *networkNotifier) Disconnected(net network.Network, conn network.Conn) {
|
||||
// Update the status of the disconnected peer
|
||||
if n.relay != nil {
|
||||
n.relay.onPeerDisconnected(conn.RemotePeer())
|
||||
}
|
||||
}
|
||||
|
||||
// Listen is called when the node starts listening on an address
|
||||
func (n *networkNotifier) Listen(net network.Network, addr multiaddr.Multiaddr) {}
|
||||
|
||||
// ListenClose is called when the node stops listening on an address
|
||||
func (n *networkNotifier) ListenClose(net network.Network, addr multiaddr.Multiaddr) {}
|
||||
|
||||
// --- PubSub Setup ---
|
||||
|
||||
// setupPubSub initializes PubSub topics and subscriptions.
|
||||
func (r *Relay) setupPubSub(ctx context.Context) error {
|
||||
var err error
|
||||
|
||||
// Room State Topic
|
||||
r.pubTopicState, err = r.PubSub.Join(roomStateTopicName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to join room state topic '%s': %w", roomStateTopicName, err)
|
||||
}
|
||||
stateSub, err := r.pubTopicState.Subscribe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to subscribe to room state topic '%s': %w", roomStateTopicName, err)
|
||||
}
|
||||
go r.handleRoomStateMessages(ctx, stateSub) // Handler in relay_state.go
|
||||
|
||||
// Relay Metrics Topic
|
||||
r.pubTopicRelayMetrics, err = r.PubSub.Join(relayMetricsTopicName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to join relay metrics topic '%s': %w", relayMetricsTopicName, err)
|
||||
}
|
||||
metricsSub, err := r.pubTopicRelayMetrics.Subscribe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to subscribe to relay metrics topic '%s': %w", relayMetricsTopicName, err)
|
||||
}
|
||||
go r.handleRelayMetricsMessages(ctx, metricsSub) // Handler in relay_state.go
|
||||
|
||||
slog.Info("PubSub topics joined and subscriptions started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- Connection Management ---
|
||||
|
||||
// connectToRelay is internal method to connect to a relay peer using multiaddresses
|
||||
func (r *Relay) connectToRelay(ctx context.Context, peerInfo *peer.AddrInfo) error {
|
||||
if peerInfo.ID == r.ID {
|
||||
return errors.New("cannot connect to self")
|
||||
}
|
||||
|
||||
// Use a timeout for the connection attempt
|
||||
connectCtx, cancel := context.WithTimeout(ctx, 15*time.Second) // 15s timeout
|
||||
defer cancel()
|
||||
|
||||
slog.Info("Attempting to connect to peer", "peer", peerInfo.ID, "addrs", peerInfo.Addrs)
|
||||
if err := r.Host.Connect(connectCtx, *peerInfo); err != nil {
|
||||
return fmt.Errorf("failed to connect to %s: %w", peerInfo.ID, err)
|
||||
}
|
||||
|
||||
slog.Info("Successfully connected to peer", "peer", peerInfo.ID, "addrs", peerInfo.Addrs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectToRelay connects to another relay by its multiaddress.
|
||||
func (r *Relay) ConnectToRelay(ctx context.Context, addr string) error {
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid multiaddress: %w", err)
|
||||
}
|
||||
|
||||
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract peer info: %w", err)
|
||||
}
|
||||
|
||||
return r.connectToRelay(ctx, peerInfo)
|
||||
}
|
||||
|
||||
// printConnectInstructions logs the multiaddresses for connecting to this relay.
|
||||
func printConnectInstructions(p2pHost host.Host) {
|
||||
peerInfo := peer.AddrInfo{
|
||||
ID: p2pHost.ID(),
|
||||
Addrs: p2pHost.Addrs(),
|
||||
}
|
||||
addrs, err := peer.AddrInfoToP2pAddrs(&peerInfo)
|
||||
if err != nil {
|
||||
slog.Error("Failed to convert peer info to addresses", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Mesh connection addresses:")
|
||||
for _, addr := range addrs {
|
||||
slog.Info(fmt.Sprintf("> %s", addr.String()))
|
||||
}
|
||||
}
|
||||
694
packages/relay/internal/core/protocol_stream.go
Normal file
694
packages/relay/internal/core/protocol_stream.go
Normal file
@@ -0,0 +1,694 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
"relay/internal/shared"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pion/rtp"
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
// TODO:s
|
||||
// TODO: When disconnecting with stream open, causes crash on requester
|
||||
// TODO: Need to trigger stream request if remote room is online and there are participants in local waiting
|
||||
// TODO: Cleanup local room state when stream is closed upstream
|
||||
|
||||
// --- Protocol IDs ---
|
||||
const (
|
||||
protocolStreamRequest = "/nestri-relay/stream-request/1.0.0" // For requesting a stream from relay
|
||||
protocolStreamPush = "/nestri-relay/stream-push/1.0.0" // For pushing a stream to relay
|
||||
)
|
||||
|
||||
// --- Protocol Types ---
|
||||
|
||||
// StreamConnection is a connection between two relays for stream protocol
|
||||
type StreamConnection struct {
|
||||
pc *webrtc.PeerConnection
|
||||
ndc *connections.NestriDataChannel
|
||||
}
|
||||
|
||||
// StreamProtocol deals with meshed stream forwarding
|
||||
type StreamProtocol struct {
|
||||
relay *Relay
|
||||
servedConns *common.SafeMap[peer.ID, *StreamConnection] // peer ID -> StreamConnection (for served streams)
|
||||
incomingConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for incoming pushed streams)
|
||||
requestedConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for requested streams from other relays)
|
||||
}
|
||||
|
||||
func NewStreamProtocol(relay *Relay) *StreamProtocol {
|
||||
protocol := &StreamProtocol{
|
||||
relay: relay,
|
||||
servedConns: common.NewSafeMap[peer.ID, *StreamConnection](),
|
||||
incomingConns: common.NewSafeMap[string, *StreamConnection](),
|
||||
requestedConns: common.NewSafeMap[string, *StreamConnection](),
|
||||
}
|
||||
|
||||
protocol.relay.Host.SetStreamHandler(protocolStreamRequest, protocol.handleStreamRequest)
|
||||
protocol.relay.Host.SetStreamHandler(protocolStreamPush, protocol.handleStreamPush)
|
||||
|
||||
return protocol
|
||||
}
|
||||
|
||||
// --- Protocol Stream Handlers ---
|
||||
|
||||
// handleStreamRequest manages a request from another relay for a stream hosted locally
|
||||
func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
|
||||
safeBRW := common.NewSafeBufioRW(brw)
|
||||
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
for {
|
||||
data, err := safeBRW.Receive()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
|
||||
slog.Debug("Stream request connection closed by peer", "peer", stream.Conn().RemotePeer())
|
||||
return
|
||||
}
|
||||
|
||||
slog.Error("Failed to receive data", "err", err)
|
||||
_ = stream.Reset()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var baseMsg connections.MessageBase
|
||||
if err = json.Unmarshal(data, &baseMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal base message", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch baseMsg.Type {
|
||||
case "request-stream-room":
|
||||
var rawMsg connections.MessageRaw
|
||||
if err = json.Unmarshal(data, &rawMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal raw message for room stream request", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var roomName string
|
||||
if err = json.Unmarshal(rawMsg.Data, &roomName); err != nil {
|
||||
slog.Error("Failed to unmarshal room name from raw message", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Info("Received stream request for room", "room", roomName)
|
||||
room := sp.relay.GetRoomByName(roomName)
|
||||
if room == nil || !room.IsOnline() || room.OwnerID != sp.relay.ID {
|
||||
// TODO: Allow forward requests to other relays from here?
|
||||
slog.Debug("Cannot provide stream for nil, offline or non-owned room", "room", roomName, "is_online", room != nil && room.IsOnline(), "is_owner", room != nil && room.OwnerID == sp.relay.ID)
|
||||
// Respond with "request-stream-offline" message with room name
|
||||
// TODO: Store the peer and send "online" message when the room comes online
|
||||
roomNameData, err := json.Marshal(roomName)
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal room name for request stream offline", "room", roomName, "err", err)
|
||||
continue
|
||||
} else {
|
||||
if err = safeBRW.SendJSON(connections.NewMessageRaw(
|
||||
"request-stream-offline",
|
||||
roomNameData,
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send request stream offline message", "room", roomName, "err", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
slog.Info("PeerConnection closed for requested stream", "room", roomName)
|
||||
// Cleanup the stream connection
|
||||
if ok := sp.servedConns.Has(stream.Conn().RemotePeer()); ok {
|
||||
sp.servedConns.Delete(stream.Conn().RemotePeer())
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create PeerConnection for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add tracks
|
||||
if room.AudioTrack != nil {
|
||||
if _, err = pc.AddTrack(room.AudioTrack); err != nil {
|
||||
slog.Error("Failed to add audio track for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if room.VideoTrack != nil {
|
||||
if _, err = pc.AddTrack(room.VideoTrack); err != nil {
|
||||
slog.Error("Failed to add video track for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// DataChannel setup
|
||||
settingOrdered := true
|
||||
settingMaxRetransmits := uint16(2)
|
||||
dc, err := pc.CreateDataChannel("relay-data", &webrtc.DataChannelInit{
|
||||
Ordered: &settingOrdered,
|
||||
MaxRetransmits: &settingMaxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create DataChannel for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
ndc := connections.NewNestriDataChannel(dc)
|
||||
|
||||
ndc.RegisterOnOpen(func() {
|
||||
slog.Debug("Relay DataChannel opened for requested stream", "room", roomName)
|
||||
})
|
||||
ndc.RegisterOnClose(func() {
|
||||
slog.Debug("Relay DataChannel closed for requested stream", "room", roomName)
|
||||
})
|
||||
ndc.RegisterMessageCallback("input", func(data []byte) {
|
||||
if room.DataChannel != nil {
|
||||
if err = room.DataChannel.SendBinary(data); err != nil {
|
||||
slog.Error("Failed to forward input message from mesh to upstream room", "room", roomName, "err", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// ICE Candidate handling
|
||||
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = safeBRW.SendJSON(connections.NewMessageICE("ice-candidate", candidate.ToJSON())); err != nil {
|
||||
slog.Error("Failed to send ICE candidate message for requested stream", "room", roomName, "err", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
// Create offer
|
||||
offer, err := pc.CreateOffer(nil)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create offer for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
if err = pc.SetLocalDescription(offer); err != nil {
|
||||
slog.Error("Failed to set local description for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
if err = safeBRW.SendJSON(connections.NewMessageSDP("offer", offer)); err != nil {
|
||||
slog.Error("Failed to send offer for requested stream", "room", roomName, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Store the connection
|
||||
sp.servedConns.Set(stream.Conn().RemotePeer(), &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: ndc,
|
||||
})
|
||||
|
||||
slog.Debug("Sent offer for requested stream")
|
||||
case "ice-candidate":
|
||||
var iceMsg connections.MessageICE
|
||||
if err := json.Unmarshal(data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal ICE message", "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok && conn.pc.RemoteDescription() != nil {
|
||||
if err := conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate", "err", err)
|
||||
}
|
||||
for _, heldIce := range iceHolder {
|
||||
if err := conn.pc.AddICECandidate(heldIce); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate", "err", err)
|
||||
}
|
||||
}
|
||||
// Clear the held candidates
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
// Hold the candidate until remote description is set
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
case "answer":
|
||||
var answerMsg connections.MessageSDP
|
||||
if err := json.Unmarshal(data, &answerMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal answer from signaling message", "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok {
|
||||
if err := conn.pc.SetRemoteDescription(answerMsg.SDP); err != nil {
|
||||
slog.Error("Failed to set remote description for answer", "err", err)
|
||||
continue
|
||||
}
|
||||
slog.Debug("Set remote description for answer")
|
||||
} else {
|
||||
slog.Warn("Received answer without active PeerConnection")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// requestStream manages the internals of the stream request
|
||||
func (sp *StreamProtocol) requestStream(stream network.Stream, room *shared.Room) error {
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
|
||||
safeBRW := common.NewSafeBufioRW(brw)
|
||||
|
||||
slog.Debug("Requesting room stream from peer", "room", room.Name, "peer", stream.Conn().RemotePeer())
|
||||
|
||||
// Send room name to the remote peer
|
||||
roomData, err := json.Marshal(room.Name)
|
||||
if err != nil {
|
||||
_ = stream.Close()
|
||||
return fmt.Errorf("failed to marshal room name: %w", err)
|
||||
}
|
||||
if err = safeBRW.SendJSON(connections.NewMessageRaw(
|
||||
"request-stream-room",
|
||||
roomData,
|
||||
)); err != nil {
|
||||
_ = stream.Close()
|
||||
return fmt.Errorf("failed to send room request: %w", err)
|
||||
}
|
||||
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
slog.Info("Relay PeerConnection closed for requested stream", "room", room.Name)
|
||||
_ = stream.Close() // ignore error as may be closed already
|
||||
// Cleanup the stream connection
|
||||
if ok := sp.requestedConns.Has(room.Name); ok {
|
||||
sp.requestedConns.Delete(room.Name)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
_ = stream.Close()
|
||||
return fmt.Errorf("failed to create PeerConnection: %w", err)
|
||||
}
|
||||
|
||||
pc.OnTrack(func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
|
||||
localTrack, _ := webrtc.NewTrackLocalStaticRTP(track.Codec().RTPCodecCapability, track.ID(), "relay-"+room.Name+"-"+track.Kind().String())
|
||||
slog.Debug("Received track for requested stream", "room", room.Name, "track_kind", track.Kind().String())
|
||||
|
||||
room.SetTrack(track.Kind(), localTrack)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
rtpPacket, _, err := track.ReadRTP()
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
slog.Error("Failed to read RTP packet for requested stream room", "room", room.Name, "err", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
err = localTrack.WriteRTP(rtpPacket)
|
||||
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
slog.Error("Failed to write RTP to local track for requested stream room", "room", room.Name, "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
ndc := connections.NewNestriDataChannel(dc)
|
||||
ndc.RegisterOnOpen(func() {
|
||||
slog.Debug("Relay DataChannel opened for requested stream", "room", room.Name)
|
||||
})
|
||||
ndc.RegisterOnClose(func() {
|
||||
slog.Debug("Relay DataChannel closed for requested stream", "room", room.Name)
|
||||
})
|
||||
|
||||
// Set the DataChannel in the requestedConns map
|
||||
if conn, ok := sp.requestedConns.Get(room.Name); ok {
|
||||
conn.ndc = ndc
|
||||
} else {
|
||||
sp.requestedConns.Set(room.Name, &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: ndc,
|
||||
})
|
||||
}
|
||||
|
||||
// We do not handle any messages from upstream here
|
||||
})
|
||||
|
||||
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = safeBRW.SendJSON(connections.NewMessageICE(
|
||||
"ice-candidate",
|
||||
candidate.ToJSON(),
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send ICE candidate message for requested stream", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
// Handle incoming messages (offer and candidates)
|
||||
go func() {
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
|
||||
for {
|
||||
data, err := safeBRW.Receive()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
|
||||
slog.Debug("Connection for requested stream closed by peer", "room", room.Name)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Error("Failed to receive data for requested stream", "room", room.Name, "err", err)
|
||||
_ = stream.Reset()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var baseMsg connections.MessageBase
|
||||
if err = json.Unmarshal(data, &baseMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal base message for requested stream", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch baseMsg.Type {
|
||||
case "ice-candidate":
|
||||
var iceMsg connections.MessageICE
|
||||
if err = json.Unmarshal(data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal ICE candidate for requested stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.requestedConns.Get(room.Name); ok && conn.pc.RemoteDescription() != nil {
|
||||
if err = conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate for requested stream", "room", room.Name, "err", err)
|
||||
}
|
||||
// Add held candidates
|
||||
for _, heldCandidate := range iceHolder {
|
||||
if err = conn.pc.AddICECandidate(heldCandidate); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate for requested stream", "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
// Clear the held candidates
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
// Hold the candidate until remote description is set
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
case "offer":
|
||||
var offerMsg connections.MessageSDP
|
||||
if err = json.Unmarshal(data, &offerMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal offer for requested stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
if err = pc.SetRemoteDescription(offerMsg.SDP); err != nil {
|
||||
slog.Error("Failed to set remote description for requested stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
answer, err := pc.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create answer for requested stream", "room", room.Name, "err", err)
|
||||
if err = stream.Reset(); err != nil {
|
||||
slog.Error("Failed to reset stream for requested stream", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err = pc.SetLocalDescription(answer); err != nil {
|
||||
slog.Error("Failed to set local description for requested stream", "room", room.Name, "err", err)
|
||||
if err = stream.Reset(); err != nil {
|
||||
slog.Error("Failed to reset stream for requested stream", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err = safeBRW.SendJSON(connections.NewMessageSDP(
|
||||
"answer",
|
||||
answer,
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send answer for requested stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Store the connection
|
||||
sp.requestedConns.Set(room.Name, &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: nil,
|
||||
})
|
||||
|
||||
slog.Debug("Sent answer for requested stream", "room", room.Name)
|
||||
default:
|
||||
slog.Warn("Unknown signaling message type", "room", room.Name, "type", baseMsg.Type)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleStreamPush manages a stream push from a node (nestri-server)
|
||||
func (sp *StreamProtocol) handleStreamPush(stream network.Stream) {
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
|
||||
safeBRW := common.NewSafeBufioRW(brw)
|
||||
|
||||
var room *shared.Room
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
for {
|
||||
data, err := safeBRW.Receive()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
|
||||
slog.Debug("Stream push connection closed by peer", "peer", stream.Conn().RemotePeer())
|
||||
return
|
||||
}
|
||||
|
||||
slog.Error("Failed to receive data for stream push", "err", err)
|
||||
_ = stream.Reset()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var baseMsg connections.MessageBase
|
||||
if err = json.Unmarshal(data, &baseMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal base message from base message", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch baseMsg.Type {
|
||||
case "push-stream-room":
|
||||
var rawMsg connections.MessageRaw
|
||||
if err = json.Unmarshal(data, &rawMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal room name from data", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var roomName string
|
||||
if err = json.Unmarshal(rawMsg.Data, &roomName); err != nil {
|
||||
slog.Error("Failed to unmarshal room name from raw message", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Info("Received stream push request for room", "room", roomName)
|
||||
|
||||
room = sp.relay.GetRoomByName(roomName)
|
||||
if room != nil {
|
||||
if room.OwnerID != sp.relay.ID {
|
||||
slog.Error("Cannot push a stream to non-owned room", "room", room.Name, "owner_id", room.OwnerID)
|
||||
continue
|
||||
}
|
||||
if room.IsOnline() {
|
||||
slog.Error("Cannot push a stream to already online room", "room", room.Name)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Create a new room if it doesn't exist
|
||||
room = sp.relay.CreateRoom(roomName)
|
||||
}
|
||||
|
||||
// Respond with an OK with the room name
|
||||
roomData, err := json.Marshal(room.Name)
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal room name for push stream response", "err", err)
|
||||
continue
|
||||
}
|
||||
if err = safeBRW.SendJSON(connections.NewMessageRaw(
|
||||
"push-stream-ok",
|
||||
roomData,
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send push stream OK response", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
case "ice-candidate":
|
||||
var iceMsg connections.MessageICE
|
||||
if err = json.Unmarshal(data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal ICE candidate from data", "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.incomingConns.Get(room.Name); ok && conn.pc.RemoteDescription() != nil {
|
||||
if err = conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate for pushed stream", "err", err)
|
||||
}
|
||||
for _, heldIce := range iceHolder {
|
||||
if err := conn.pc.AddICECandidate(heldIce); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate for pushed stream", "err", err)
|
||||
}
|
||||
}
|
||||
// Clear the held candidates
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
// Hold the candidate until remote description is set
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
case "offer":
|
||||
// Make sure we have room set to push to (set by "push-stream-room")
|
||||
if room == nil {
|
||||
slog.Error("Received offer without room set for stream push")
|
||||
continue
|
||||
}
|
||||
|
||||
var offerMsg connections.MessageSDP
|
||||
if err = json.Unmarshal(data, &offerMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal offer from data", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create PeerConnection for the incoming stream
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
slog.Info("PeerConnection closed for pushed stream", "room", room.Name)
|
||||
// Cleanup the stream connection
|
||||
if ok := sp.incomingConns.Has(room.Name); ok {
|
||||
sp.incomingConns.Delete(room.Name)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create PeerConnection for pushed stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
// TODO: Is this the best way to handle DataChannel? Should we just use the map directly?
|
||||
room.DataChannel = connections.NewNestriDataChannel(dc)
|
||||
room.DataChannel.RegisterOnOpen(func() {
|
||||
slog.Debug("DataChannel opened for pushed stream", "room", room.Name)
|
||||
})
|
||||
room.DataChannel.RegisterOnClose(func() {
|
||||
slog.Debug("DataChannel closed for pushed stream", "room", room.Name)
|
||||
})
|
||||
|
||||
// Set the DataChannel in the incomingConns map
|
||||
if conn, ok := sp.incomingConns.Get(room.Name); ok {
|
||||
conn.ndc = room.DataChannel
|
||||
} else {
|
||||
sp.incomingConns.Set(room.Name, &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: room.DataChannel,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = safeBRW.SendJSON(connections.NewMessageICE(
|
||||
"ice-candidate",
|
||||
candidate.ToJSON(),
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send ICE candidate message for pushed stream", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
pc.OnTrack(func(remoteTrack *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
|
||||
localTrack, err := webrtc.NewTrackLocalStaticRTP(remoteTrack.Codec().RTPCodecCapability, remoteTrack.Kind().String(), fmt.Sprintf("nestri-%s-%s", room.Name, remoteTrack.Kind().String()))
|
||||
if err != nil {
|
||||
slog.Error("Failed to create local track for pushed stream", "room", room.Name, "track_kind", remoteTrack.Kind().String(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Debug("Received track for pushed stream", "room", room.Name, "track_kind", remoteTrack.Kind().String())
|
||||
|
||||
// Set track for Room
|
||||
room.SetTrack(remoteTrack.Kind(), localTrack)
|
||||
|
||||
// Prepare PlayoutDelayExtension so we don't need to recreate it for each packet
|
||||
playoutExt := &rtp.PlayoutDelayExtension{
|
||||
MinDelay: 0,
|
||||
MaxDelay: 0,
|
||||
}
|
||||
playoutPayload, err := playoutExt.Marshal()
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal PlayoutDelayExtension for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
rtpPacket, _, err := remoteTrack.ReadRTP()
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
slog.Error("Failed to read RTP from remote track for room", "room", room.Name, "err", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Use PlayoutDelayExtension for low latency, if set for this track kind
|
||||
if extID, ok := common.GetExtension(remoteTrack.Kind(), common.ExtensionPlayoutDelay); ok {
|
||||
if err := rtpPacket.SetExtension(extID, playoutPayload); err != nil {
|
||||
slog.Error("Failed to set PlayoutDelayExtension for room", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err = localTrack.WriteRTP(rtpPacket)
|
||||
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
slog.Error("Failed to write RTP to local track for room", "room", room.Name, "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
slog.Debug("Track closed for room", "room", room.Name, "track_kind", remoteTrack.Kind().String())
|
||||
|
||||
// Cleanup the track from the room
|
||||
room.SetTrack(remoteTrack.Kind(), nil)
|
||||
})
|
||||
|
||||
// Set the remote description
|
||||
if err = pc.SetRemoteDescription(offerMsg.SDP); err != nil {
|
||||
slog.Error("Failed to set remote description for pushed stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
slog.Debug("Set remote description for pushed stream", "room", room.Name)
|
||||
|
||||
// Create an answer
|
||||
answer, err := pc.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create answer for pushed stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
if err = pc.SetLocalDescription(answer); err != nil {
|
||||
slog.Error("Failed to set local description for pushed stream", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
if err = safeBRW.SendJSON(connections.NewMessageSDP(
|
||||
"answer",
|
||||
answer,
|
||||
)); err != nil {
|
||||
slog.Error("Failed to send answer for pushed stream", "room", room.Name, "err", err)
|
||||
}
|
||||
|
||||
// Store the connection
|
||||
sp.incomingConns.Set(room.Name, &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: room.DataChannel, // if it exists, if not it will be set later
|
||||
})
|
||||
slog.Debug("Sent answer for pushed stream", "room", room.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Public Usable Methods ---
|
||||
|
||||
// RequestStream sends a request to get room stream from another relay
|
||||
func (sp *StreamProtocol) RequestStream(ctx context.Context, room *shared.Room, peerID peer.ID) error {
|
||||
stream, err := sp.relay.Host.NewStream(ctx, peerID, protocolStreamRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stream request: %w", err)
|
||||
}
|
||||
|
||||
return sp.requestStream(stream, room)
|
||||
}
|
||||
13
packages/relay/internal/core/protocols.go
Normal file
13
packages/relay/internal/core/protocols.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package core
|
||||
|
||||
// ProtocolRegistry is a type holding all protocols to split away the bloat
|
||||
type ProtocolRegistry struct {
|
||||
StreamProtocol *StreamProtocol
|
||||
}
|
||||
|
||||
// NewProtocolRegistry initializes and returns a new protocol registry
|
||||
func NewProtocolRegistry(relay *Relay) ProtocolRegistry {
|
||||
return ProtocolRegistry{
|
||||
StreamProtocol: NewStreamProtocol(relay),
|
||||
}
|
||||
}
|
||||
108
packages/relay/internal/core/room.go
Normal file
108
packages/relay/internal/core/room.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"relay/internal/shared"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/oklog/ulid/v2"
|
||||
)
|
||||
|
||||
// --- Room Management ---
|
||||
|
||||
// GetRoomByID retrieves a local Room struct by its ULID
|
||||
func (r *Relay) GetRoomByID(id ulid.ULID) *shared.Room {
|
||||
if room, ok := r.LocalRooms.Get(id); ok {
|
||||
return room
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRoomByName retrieves a local Room struct by its name
|
||||
func (r *Relay) GetRoomByName(name string) *shared.Room {
|
||||
for _, room := range r.LocalRooms.Copy() {
|
||||
if room.Name == name {
|
||||
return room
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoom creates a new local Room struct with the given name
|
||||
func (r *Relay) CreateRoom(name string) *shared.Room {
|
||||
roomID := ulid.Make()
|
||||
room := shared.NewRoom(name, roomID, r.ID)
|
||||
r.LocalRooms.Set(room.ID, room)
|
||||
slog.Debug("Created new local room", "room", name, "id", room.ID)
|
||||
return room
|
||||
}
|
||||
|
||||
// DeleteRoomIfEmpty checks if a local room struct is inactive and can be removed
|
||||
func (r *Relay) DeleteRoomIfEmpty(room *shared.Room) {
|
||||
if room == nil {
|
||||
return
|
||||
}
|
||||
if room.Participants.Len() == 0 && r.LocalRooms.Has(room.ID) {
|
||||
slog.Debug("Deleting empty room without participants", "room", room.Name)
|
||||
r.LocalRooms.Delete(room.ID)
|
||||
err := room.PeerConnection.Close()
|
||||
if err != nil {
|
||||
slog.Error("Failed to close Room PeerConnection", "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetRemoteRoomByName returns room from mesh by name
|
||||
func (r *Relay) GetRemoteRoomByName(roomName string) *shared.RoomInfo {
|
||||
for _, room := range r.MeshRooms.Copy() {
|
||||
if room.Name == roomName && room.OwnerID != r.ID {
|
||||
// Make sure connection is alive
|
||||
if r.Host.Network().Connectedness(room.OwnerID) == network.Connected {
|
||||
return &room
|
||||
} else {
|
||||
slog.Debug("Removing stale peer, owns a room without connection", "room", roomName, "peer", room.OwnerID)
|
||||
r.onPeerDisconnected(room.OwnerID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- State Publishing ---
|
||||
|
||||
// publishRoomStates publishes the state of all rooms currently owned by *this* relay
|
||||
func (r *Relay) publishRoomStates(ctx context.Context) error {
|
||||
if r.pubTopicState == nil {
|
||||
slog.Warn("Cannot publish room states: topic is nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
var statesToPublish []shared.RoomInfo
|
||||
r.LocalRooms.Range(func(id ulid.ULID, room *shared.Room) bool {
|
||||
// Only publish state for rooms owned by this relay
|
||||
if room.OwnerID == r.ID {
|
||||
statesToPublish = append(statesToPublish, shared.RoomInfo{
|
||||
ID: room.ID,
|
||||
Name: room.Name,
|
||||
OwnerID: r.ID,
|
||||
})
|
||||
}
|
||||
return true // Continue iteration
|
||||
})
|
||||
|
||||
if len(statesToPublish) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.Marshal(statesToPublish)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal local room states: %w", err)
|
||||
}
|
||||
if pubErr := r.pubTopicState.Publish(ctx, data); pubErr != nil {
|
||||
slog.Error("Failed to publish room states message", "err", pubErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
173
packages/relay/internal/core/state.go
Normal file
173
packages/relay/internal/core/state.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"relay/internal/shared"
|
||||
"time"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// --- PubSub Message Handlers ---
|
||||
|
||||
// handleRoomStateMessages processes incoming room state updates from peers.
|
||||
func (r *Relay) handleRoomStateMessages(ctx context.Context, sub *pubsub.Subscription) {
|
||||
slog.Debug("Starting room state message handler...")
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Info("Stopping room state message handler")
|
||||
return
|
||||
default:
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, pubsub.ErrSubscriptionCancelled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
slog.Info("Room state subscription ended", "err", err)
|
||||
return
|
||||
}
|
||||
slog.Error("Error receiving room state message", "err", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
if msg.GetFrom() == r.Host.ID() {
|
||||
continue
|
||||
}
|
||||
|
||||
var states []shared.RoomInfo
|
||||
if err := json.Unmarshal(msg.Data, &states); err != nil {
|
||||
slog.Error("Failed to unmarshal room states", "from", msg.GetFrom(), "data_len", len(msg.Data), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
r.updateMeshRoomStates(msg.GetFrom(), states)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleRelayMetricsMessages processes incoming status updates from peers.
|
||||
func (r *Relay) handleRelayMetricsMessages(ctx context.Context, sub *pubsub.Subscription) {
|
||||
slog.Debug("Starting relay metrics message handler...")
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Info("Stopping relay metrics message handler")
|
||||
return
|
||||
default:
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, pubsub.ErrSubscriptionCancelled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
slog.Info("Relay metrics subscription ended", "err", err)
|
||||
return
|
||||
}
|
||||
slog.Error("Error receiving relay metrics message", "err", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
if msg.GetFrom() == r.Host.ID() {
|
||||
continue
|
||||
}
|
||||
|
||||
var info RelayInfo
|
||||
if err := json.Unmarshal(msg.Data, &info); err != nil {
|
||||
slog.Error("Failed to unmarshal relay status", "from", msg.GetFrom(), "data_len", len(msg.Data), "err", err)
|
||||
continue
|
||||
}
|
||||
if info.ID != msg.GetFrom() {
|
||||
slog.Error("Peer ID mismatch in relay status", "expected", info.ID, "actual", msg.GetFrom())
|
||||
continue
|
||||
}
|
||||
r.onPeerStatus(info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- State Check Functions ---
|
||||
// hasConnectedPeer checks if peer is in map and has a valid connection
|
||||
func (r *Relay) hasConnectedPeer(peerID peer.ID) bool {
|
||||
if _, ok := r.LocalMeshPeers.Get(peerID); !ok {
|
||||
return false
|
||||
}
|
||||
if r.Host.Network().Connectedness(peerID) != network.Connected {
|
||||
slog.Debug("Peer not connected", "peer", peerID)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// --- State Change Functions ---
|
||||
|
||||
// onPeerStatus updates the status of a peer based on received metrics, adding local perspective
|
||||
func (r *Relay) onPeerStatus(recvInfo RelayInfo) {
|
||||
r.LocalMeshPeers.Set(recvInfo.ID, &recvInfo)
|
||||
}
|
||||
|
||||
// onPeerConnected is called when a new peer connects to the relay
|
||||
func (r *Relay) onPeerConnected(peerID peer.ID) {
|
||||
// Add to local peer map
|
||||
r.LocalMeshPeers.Set(peerID, &RelayInfo{
|
||||
ID: peerID,
|
||||
})
|
||||
|
||||
slog.Info("Peer connected", "peer", peerID)
|
||||
|
||||
// Trigger immediate state exchange
|
||||
go func() {
|
||||
if err := r.publishRelayMetrics(context.Background()); err != nil {
|
||||
slog.Error("Failed to publish relay metrics on connect", "err", err)
|
||||
} else {
|
||||
if err = r.publishRoomStates(context.Background()); err != nil {
|
||||
slog.Error("Failed to publish room states on connect", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// onPeerDisconnected marks a peer as disconnected in our status view and removes latency info
|
||||
func (r *Relay) onPeerDisconnected(peerID peer.ID) {
|
||||
slog.Info("Mesh peer disconnected, deleting from local peer map", "peer", peerID)
|
||||
// Remove peer from local mesh peers
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
}
|
||||
// Remove any rooms associated with this peer
|
||||
if r.MeshRooms.Has(peerID.String()) {
|
||||
r.MeshRooms.Delete(peerID.String())
|
||||
}
|
||||
// Remove any latencies associated with this peer
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
}
|
||||
|
||||
// TODO: If any rooms were routed through this peer, handle that case
|
||||
}
|
||||
|
||||
// updateMeshRoomStates merges received room states into the MeshRooms map
|
||||
// TODO: Wrap in another type with timestamp or another mechanism to avoid conflicts
|
||||
func (r *Relay) updateMeshRoomStates(peerID peer.ID, states []shared.RoomInfo) {
|
||||
for _, state := range states {
|
||||
if state.OwnerID == r.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
// If previously did not exist, but does now, request a connection if participants exist for our room
|
||||
existed := r.MeshRooms.Has(state.ID.String())
|
||||
if !existed {
|
||||
// Request connection to this peer if we have participants in our local room
|
||||
if room, ok := r.LocalRooms.Get(state.ID); ok {
|
||||
if room.Participants.Len() > 0 {
|
||||
slog.Debug("Got new remote room state, we locally have participants for, requesting stream", "room_name", room.Name, "peer", peerID)
|
||||
if err := r.StreamProtocol.RequestStream(context.Background(), room, peerID); err != nil {
|
||||
slog.Error("Failed to request stream for new remote room state", "room_name", room.Name, "peer", peerID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.MeshRooms.Set(state.ID.String(), state)
|
||||
}
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"log/slog"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
gen "relay/internal/proto"
|
||||
)
|
||||
|
||||
func ParticipantHandler(participant *Participant, room *Room, relay *Relay) {
|
||||
onPCClose := func() {
|
||||
slog.Debug("Participant PeerConnection closed", "participant", participant.ID, "room", room.Name)
|
||||
room.removeParticipantByID(participant.ID)
|
||||
}
|
||||
|
||||
var err error
|
||||
participant.PeerConnection, err = common.CreatePeerConnection(onPCClose)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create participant PeerConnection", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Data channel settings
|
||||
settingOrdered := true
|
||||
settingMaxRetransmits := uint16(0)
|
||||
dc, err := participant.PeerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
Ordered: &settingOrdered,
|
||||
MaxRetransmits: &settingMaxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create data channel for participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
participant.DataChannel = connections.NewNestriDataChannel(dc)
|
||||
|
||||
// Register channel opening handling
|
||||
participant.DataChannel.RegisterOnOpen(func() {
|
||||
slog.Debug("DataChannel opened for participant", "participant", participant.ID, "room", room.Name)
|
||||
})
|
||||
|
||||
// Register channel closing handling
|
||||
participant.DataChannel.RegisterOnClose(func() {
|
||||
slog.Debug("DataChannel closed for participant", "participant", participant.ID, "room", room.Name)
|
||||
})
|
||||
|
||||
// Register text message handling
|
||||
participant.DataChannel.RegisterMessageCallback("input", func(data []byte) {
|
||||
ForwardParticipantDataChannelMessage(participant, room, data)
|
||||
})
|
||||
|
||||
participant.PeerConnection.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
if err := participant.WebSocket.SendICECandidateMessageWS(candidate.ToJSON()); err != nil {
|
||||
slog.Error("Failed to send ICE candidate to participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
|
||||
// ICE callback
|
||||
participant.WebSocket.RegisterMessageCallback("ice", func(data []byte) {
|
||||
var iceMsg connections.MessageICECandidate
|
||||
if err = json.Unmarshal(data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to decode ICE candidate message from participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
if participant.PeerConnection.RemoteDescription() != nil {
|
||||
if err = participant.PeerConnection.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate for participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
// Add held ICE candidates
|
||||
for _, heldCandidate := range iceHolder {
|
||||
if err = participant.PeerConnection.AddICECandidate(heldCandidate); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate for participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
iceHolder = nil
|
||||
} else {
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
})
|
||||
|
||||
// SDP answer callback
|
||||
participant.WebSocket.RegisterMessageCallback("sdp", func(data []byte) {
|
||||
var sdpMsg connections.MessageSDP
|
||||
if err = json.Unmarshal(data, &sdpMsg); err != nil {
|
||||
slog.Error("Failed to decode SDP message from participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
handleParticipantSDP(participant, sdpMsg)
|
||||
})
|
||||
|
||||
// Log callback
|
||||
participant.WebSocket.RegisterMessageCallback("log", func(data []byte) {
|
||||
var logMsg connections.MessageLog
|
||||
if err = json.Unmarshal(data, &logMsg); err != nil {
|
||||
slog.Error("Failed to decode log message from participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
// TODO: Handle log message sending to metrics server
|
||||
})
|
||||
|
||||
// Metrics callback
|
||||
participant.WebSocket.RegisterMessageCallback("metrics", func(data []byte) {
|
||||
// Ignore for now
|
||||
})
|
||||
|
||||
participant.WebSocket.RegisterOnClose(func() {
|
||||
slog.Debug("WebSocket closed for participant", "participant", participant.ID, "room", room.Name)
|
||||
// Remove from Room
|
||||
room.removeParticipantByID(participant.ID)
|
||||
})
|
||||
|
||||
slog.Info("Participant ready, sending OK answer", "participant", participant.ID, "room", room.Name)
|
||||
if err := participant.WebSocket.SendAnswerMessageWS(connections.AnswerOK); err != nil {
|
||||
slog.Error("Failed to send OK answer", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
|
||||
// If room is online, also send offer
|
||||
if room.Online {
|
||||
if err = room.signalParticipantWithTracks(participant); err != nil {
|
||||
slog.Error("Failed to signal participant with tracks", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
} else {
|
||||
active, provider := relay.IsRoomActive(room.ID)
|
||||
if active {
|
||||
slog.Debug("Room active remotely, requesting stream", "room", room.Name, "provider", provider)
|
||||
if _, err := relay.requestStream(context.Background(), room.Name, room.ID, provider); err != nil {
|
||||
slog.Error("Failed to request stream", "room", room.Name, "err", err)
|
||||
} else {
|
||||
slog.Debug("Stream requested successfully", "room", room.Name, "provider", provider)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SDP answer handler for participants
|
||||
func handleParticipantSDP(participant *Participant, answerMsg connections.MessageSDP) {
|
||||
// Get SDP offer
|
||||
sdpAnswer := answerMsg.SDP.SDP
|
||||
|
||||
// Set remote description
|
||||
err := participant.PeerConnection.SetRemoteDescription(webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeAnswer,
|
||||
SDP: sdpAnswer,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to set remote SDP answer for participant", "participant", participant.ID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func ForwardParticipantDataChannelMessage(participant *Participant, room *Room, data []byte) {
|
||||
// Debug mode: Add latency timestamp
|
||||
if common.GetFlags().Debug {
|
||||
var inputMsg gen.ProtoMessageInput
|
||||
if err := proto.Unmarshal(data, &inputMsg); err != nil {
|
||||
slog.Error("Failed to decode input message from participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
protoLat := inputMsg.GetMessageBase().GetLatency()
|
||||
if protoLat != nil {
|
||||
lat := common.LatencyTrackerFromProto(protoLat)
|
||||
lat.AddTimestamp("relay_to_node")
|
||||
protoLat = lat.ToProto()
|
||||
}
|
||||
if newData, err := proto.Marshal(&inputMsg); err != nil {
|
||||
slog.Error("Failed to marshal input message from participant", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
return
|
||||
} else {
|
||||
// Update data with the modified message
|
||||
data = newData
|
||||
}
|
||||
}
|
||||
|
||||
// Forward to local room DataChannel if it exists (e.g., local ingest)
|
||||
if room.DataChannel != nil {
|
||||
if err := room.DataChannel.SendBinary(data); err != nil {
|
||||
slog.Error("Failed to send input message to room", "participant", participant.ID, "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,202 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/libp2p/go-reuseport"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var httpMux *http.ServeMux
|
||||
|
||||
func InitHTTPEndpoint(_ context.Context, ctxCancel context.CancelFunc) error {
|
||||
// Create HTTP mux which serves our WS endpoint
|
||||
httpMux = http.NewServeMux()
|
||||
|
||||
// Endpoints themselves
|
||||
httpMux.Handle("/", http.NotFoundHandler())
|
||||
// If control endpoint secret is set, enable the control endpoint
|
||||
if len(common.GetFlags().ControlSecret) > 0 {
|
||||
httpMux.HandleFunc("/api/control", corsAnyHandler(controlHandler))
|
||||
}
|
||||
// WS endpoint
|
||||
httpMux.HandleFunc("/api/ws/{roomName}", corsAnyHandler(wsHandler))
|
||||
|
||||
// Get our serving port
|
||||
port := common.GetFlags().EndpointPort
|
||||
tlsCert := common.GetFlags().TLSCert
|
||||
tlsKey := common.GetFlags().TLSKey
|
||||
|
||||
// Create re-usable listener port
|
||||
httpListener, err := reuseport.Listen("tcp", ":"+strconv.Itoa(port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCP listener: %w", err)
|
||||
}
|
||||
|
||||
// Log and start the endpoint server
|
||||
if len(tlsCert) <= 0 && len(tlsKey) <= 0 {
|
||||
slog.Info("Starting HTTP endpoint server", "port", port)
|
||||
go func() {
|
||||
if err := http.Serve(httpListener, httpMux); err != nil {
|
||||
slog.Error("Failed to start HTTP server", "err", err)
|
||||
ctxCancel()
|
||||
}
|
||||
}()
|
||||
} else if len(tlsCert) > 0 && len(tlsKey) > 0 {
|
||||
slog.Info("Starting HTTPS endpoint server", "port", port)
|
||||
go func() {
|
||||
if err := http.ServeTLS(httpListener, httpMux, tlsCert, tlsKey); err != nil {
|
||||
slog.Error("Failed to start HTTPS server", "err", err)
|
||||
ctxCancel()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
return errors.New("no TLS certificate or TLS key provided")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logHTTPError logs (if verbose) and sends an error code to requester
|
||||
func logHTTPError(w http.ResponseWriter, err string, code int) {
|
||||
if common.GetFlags().Verbose {
|
||||
slog.Error("HTTP error", "code", code, "message", err)
|
||||
}
|
||||
http.Error(w, err, code)
|
||||
}
|
||||
|
||||
// corsAnyHandler allows any origin to access the endpoint
|
||||
func corsAnyHandler(next func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
// Allow all origins
|
||||
res.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
res.Header().Set("Access-Control-Allow-Methods", "*")
|
||||
res.Header().Set("Access-Control-Allow-Headers", "*")
|
||||
|
||||
if req.Method != http.MethodOptions {
|
||||
next(res, req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wsHandler is the handler for the /api/ws/{roomName} endpoint
|
||||
func wsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get given room name now
|
||||
roomName := r.PathValue("roomName")
|
||||
if len(roomName) <= 0 {
|
||||
logHTTPError(w, "no room name given", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
rel := GetRelay()
|
||||
// Get or create room in any case
|
||||
room := rel.GetOrCreateRoom(roomName)
|
||||
|
||||
// Upgrade to WebSocket
|
||||
upgrader := websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
wsConn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
logHTTPError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Create SafeWebSocket
|
||||
ws := connections.NewSafeWebSocket(wsConn)
|
||||
// Assign message handler for join request
|
||||
ws.RegisterMessageCallback("join", func(data []byte) {
|
||||
var joinMsg connections.MessageJoin
|
||||
if err = json.Unmarshal(data, &joinMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal join message", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Debug("Join message", "room", room.Name, "joinerType", joinMsg.JoinerType)
|
||||
|
||||
// Handle join request, depending if it's from ingest/node or participant/client
|
||||
switch joinMsg.JoinerType {
|
||||
case connections.JoinerNode:
|
||||
// If room already online, send InUse answer
|
||||
if room.Online {
|
||||
if err = ws.SendAnswerMessageWS(connections.AnswerInUse); err != nil {
|
||||
slog.Error("Failed to send InUse answer to node", "room", room.Name, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
room.AssignWebSocket(ws)
|
||||
go IngestHandler(room)
|
||||
case connections.JoinerClient:
|
||||
// Create participant and add to room regardless of online status
|
||||
participant := NewParticipant(ws)
|
||||
room.AddParticipant(participant)
|
||||
// If room not online, send Offline answer
|
||||
if !room.Online {
|
||||
if err = ws.SendAnswerMessageWS(connections.AnswerOffline); err != nil {
|
||||
slog.Error("Failed to send offline answer to participant", "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
go ParticipantHandler(participant, room, rel)
|
||||
default:
|
||||
slog.Error("Unknown joiner type", "joinerType", joinMsg.JoinerType)
|
||||
}
|
||||
|
||||
// Unregister ourselves, if something happens on the other side they should just reconnect?
|
||||
ws.UnregisterMessageCallback("join")
|
||||
})
|
||||
}
|
||||
|
||||
// controlMessage is the JSON struct for the control messages
|
||||
type controlMessage struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// controlHandler is the handler for the /api/control endpoint, for controlling this relay
|
||||
func controlHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Check for control secret in Authorization header
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if len(authHeader) <= 0 || authHeader != common.GetFlags().ControlSecret {
|
||||
logHTTPError(w, "missing or invalid Authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle CORS preflight request
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the control message
|
||||
var msg controlMessage
|
||||
if err := json.NewDecoder(r.Body).Decode(&msg); err != nil {
|
||||
logHTTPError(w, "failed to decode control message", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
//relay := GetRelay()
|
||||
switch msg.Type {
|
||||
case "join_mesh":
|
||||
// Join the mesh network, get relay address from msg.Value
|
||||
if len(msg.Value) <= 0 {
|
||||
logHTTPError(w, "missing relay address", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
if err := GetRelay().ConnectToRelay(ctx, msg.Value); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to connect: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write([]byte("Successfully connected to relay"))
|
||||
default:
|
||||
logHTTPError(w, "unknown control message type", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pion/rtp"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"io"
|
||||
"log/slog"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func IngestHandler(room *Room) {
|
||||
relay := GetRelay()
|
||||
|
||||
// Callback for closing PeerConnection
|
||||
onPCClose := func() {
|
||||
slog.Debug("ingest PeerConnection closed", "room", room.Name)
|
||||
room.Online = false
|
||||
room.signalParticipantsOffline()
|
||||
relay.DeleteRoomIfEmpty(room)
|
||||
}
|
||||
|
||||
var err error
|
||||
room.PeerConnection, err = common.CreatePeerConnection(onPCClose)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create ingest PeerConnection", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
room.PeerConnection.OnTrack(func(remoteTrack *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
|
||||
localTrack, err := webrtc.NewTrackLocalStaticRTP(remoteTrack.Codec().RTPCodecCapability, remoteTrack.Kind().String(), fmt.Sprintf("nestri-%s-%s", room.Name, remoteTrack.Kind().String()))
|
||||
if err != nil {
|
||||
slog.Error("Failed to create local track for room", "room", room.Name, "kind", remoteTrack.Kind(), "err", err)
|
||||
return
|
||||
}
|
||||
slog.Debug("Received track for room", "room", room.Name, "kind", remoteTrack.Kind())
|
||||
|
||||
// Set track and let Room handle state
|
||||
room.SetTrack(remoteTrack.Kind(), localTrack)
|
||||
|
||||
// Prepare PlayoutDelayExtension so we don't need to recreate it for each packet
|
||||
playoutExt := &rtp.PlayoutDelayExtension{
|
||||
MinDelay: 0,
|
||||
MaxDelay: 0,
|
||||
}
|
||||
playoutPayload, err := playoutExt.Marshal()
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal PlayoutDelayExtension for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
rtpPacket, _, err := remoteTrack.ReadRTP()
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
slog.Error("Failed to read RTP from remote track for room", "room", room.Name, "err", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Use PlayoutDelayExtension for low latency, only for video tracks
|
||||
if err := rtpPacket.SetExtension(common.ExtensionMap[common.ExtensionPlayoutDelay], playoutPayload); err != nil {
|
||||
slog.Error("Failed to set PlayoutDelayExtension for room", "room", room.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = localTrack.WriteRTP(rtpPacket)
|
||||
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
slog.Error("Failed to write RTP to local track for room", "room", room.Name, "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
slog.Debug("Track closed for room", "room", room.Name, "kind", remoteTrack.Kind())
|
||||
|
||||
// Clear track when done
|
||||
room.SetTrack(remoteTrack.Kind(), nil)
|
||||
})
|
||||
|
||||
room.PeerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
room.DataChannel = connections.NewNestriDataChannel(dc)
|
||||
slog.Debug("Ingest received DataChannel for room", "room", room.Name)
|
||||
|
||||
room.DataChannel.RegisterOnOpen(func() {
|
||||
slog.Debug("ingest DataChannel opened for room", "room", room.Name)
|
||||
})
|
||||
|
||||
room.DataChannel.OnClose(func() {
|
||||
slog.Debug("ingest DataChannel closed for room", "room", room.Name)
|
||||
})
|
||||
|
||||
// We do not handle any messages from ingest via DataChannel yet
|
||||
})
|
||||
|
||||
room.PeerConnection.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
slog.Debug("ingest received ICECandidate for room", "room", room.Name)
|
||||
err = room.WebSocket.SendICECandidateMessageWS(candidate.ToJSON())
|
||||
if err != nil {
|
||||
slog.Error("Failed to send ICE candidate message to ingest for room", "room", room.Name, "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
|
||||
// ICE callback
|
||||
room.WebSocket.RegisterMessageCallback("ice", func(data []byte) {
|
||||
var iceMsg connections.MessageICECandidate
|
||||
if err = json.Unmarshal(data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to decode ICE candidate message from ingest for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
if room.PeerConnection != nil {
|
||||
if room.PeerConnection.RemoteDescription() != nil {
|
||||
if err = room.PeerConnection.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate for room", "room", room.Name, "err", err)
|
||||
}
|
||||
for _, heldCandidate := range iceHolder {
|
||||
if err = room.PeerConnection.AddICECandidate(heldCandidate); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate for room", "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
} else {
|
||||
slog.Error("ICE candidate received but PeerConnection is nil for room", "room", room.Name)
|
||||
}
|
||||
})
|
||||
|
||||
// SDP offer callback
|
||||
room.WebSocket.RegisterMessageCallback("sdp", func(data []byte) {
|
||||
var sdpMsg connections.MessageSDP
|
||||
if err = json.Unmarshal(data, &sdpMsg); err != nil {
|
||||
slog.Error("Failed to decode SDP message from ingest for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
answer := handleIngestSDP(room, sdpMsg)
|
||||
if answer != nil {
|
||||
if err = room.WebSocket.SendSDPMessageWS(*answer); err != nil {
|
||||
slog.Error("Failed to send SDP answer message to ingest for room", "room", room.Name, "err", err)
|
||||
}
|
||||
} else {
|
||||
slog.Error("Failed to handle ingest SDP message for room", "room", room.Name)
|
||||
}
|
||||
})
|
||||
|
||||
// Log callback
|
||||
room.WebSocket.RegisterMessageCallback("log", func(data []byte) {
|
||||
var logMsg connections.MessageLog
|
||||
if err = json.Unmarshal(data, &logMsg); err != nil {
|
||||
slog.Error("Failed to decode log message from ingest for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
// TODO: Handle log message sending to metrics server
|
||||
})
|
||||
|
||||
// Metrics callback
|
||||
room.WebSocket.RegisterMessageCallback("metrics", func(data []byte) {
|
||||
var metricsMsg connections.MessageMetrics
|
||||
if err = json.Unmarshal(data, &metricsMsg); err != nil {
|
||||
slog.Error("Failed to decode metrics message from ingest for room", "room", room.Name, "err", err)
|
||||
return
|
||||
}
|
||||
// TODO: Handle metrics message sending to metrics server
|
||||
})
|
||||
|
||||
room.WebSocket.RegisterOnClose(func() {
|
||||
slog.Debug("ingest WebSocket closed for room", "room", room.Name)
|
||||
room.Online = false
|
||||
room.signalParticipantsOffline()
|
||||
relay.DeleteRoomIfEmpty(room)
|
||||
})
|
||||
|
||||
slog.Info("Room is ready, sending OK answer to ingest", "room", room.Name)
|
||||
if err = room.WebSocket.SendAnswerMessageWS(connections.AnswerOK); err != nil {
|
||||
slog.Error("Failed to send OK answer message to ingest for room", "room", room.Name, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// SDP offer handler, returns SDP answer
|
||||
func handleIngestSDP(room *Room, offerMsg connections.MessageSDP) *webrtc.SessionDescription {
|
||||
var err error
|
||||
|
||||
sdpOffer := offerMsg.SDP.SDP
|
||||
sdpOffer = strings.Replace(sdpOffer, ";sprop-maxcapturerate=24000", "", -1)
|
||||
|
||||
err = room.PeerConnection.SetRemoteDescription(webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: sdpOffer,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to set remote description for room", "room", room.Name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
answer, err := room.PeerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create SDP answer for room", "room", room.Name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = room.PeerConnection.SetLocalDescription(answer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to set local description for room", "room", room.Name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &answer
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
)
|
||||
|
||||
type Participant struct {
|
||||
ID ulid.ULID //< Internal IDs are useful to keeping unique internal track and not have conflicts later
|
||||
Name string
|
||||
WebSocket *connections.SafeWebSocket
|
||||
PeerConnection *webrtc.PeerConnection
|
||||
DataChannel *connections.NestriDataChannel
|
||||
}
|
||||
|
||||
func NewParticipant(ws *connections.SafeWebSocket) *Participant {
|
||||
id, err := common.NewULID()
|
||||
if err != nil {
|
||||
slog.Error("Failed to create ULID for Participant", "err", err)
|
||||
return nil
|
||||
}
|
||||
return &Participant{
|
||||
ID: id,
|
||||
Name: createRandomName(),
|
||||
WebSocket: ws,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Participant) addTrack(trackLocal *webrtc.TrackLocalStaticRTP) error {
|
||||
rtpSender, err := p.PeerConnection.AddTrack(trackLocal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
rtcpBuffer := make([]byte, 1400)
|
||||
for {
|
||||
if _, _, rtcpErr := rtpSender.Read(rtcpBuffer); rtcpErr != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Participant) signalOffer() error {
|
||||
if p.PeerConnection == nil {
|
||||
return fmt.Errorf("peer connection is nil for participant: '%s' - cannot signal offer", p.ID)
|
||||
}
|
||||
|
||||
offer, err := p.PeerConnection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.PeerConnection.SetLocalDescription(offer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.WebSocket.SendSDPMessageWS(offer)
|
||||
}
|
||||
|
||||
var namesFirst = []string{"Happy", "Sad", "Angry", "Calm", "Excited", "Bored", "Confused", "Confident", "Curious", "Depressed", "Disappointed", "Embarrassed", "Energetic", "Fearful", "Frustrated", "Glad", "Guilty", "Hopeful", "Impatient", "Jealous", "Lonely", "Motivated", "Nervous", "Optimistic", "Pessimistic", "Proud", "Relaxed", "Shy", "Stressed", "Surprised", "Tired", "Worried"}
|
||||
var namesSecond = []string{"Dragon", "Unicorn", "Troll", "Goblin", "Elf", "Dwarf", "Ogre", "Gnome", "Mermaid", "Siren", "Vampire", "Ghoul", "Werewolf", "Minotaur", "Centaur", "Griffin", "Phoenix", "Wyvern", "Hydra", "Kraken"}
|
||||
|
||||
func createRandomName() string {
|
||||
randomFirst := namesFirst[rand.Intn(len(namesFirst))]
|
||||
randomSecond := namesSecond[rand.Intn(len(namesSecond))]
|
||||
return randomFirst + " " + randomSecond
|
||||
}
|
||||
@@ -1,702 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/pnet"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"io"
|
||||
"log/slog"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
)
|
||||
|
||||
var globalRelay *Relay
|
||||
|
||||
// networkNotifier logs connection events
|
||||
type networkNotifier struct{}
|
||||
|
||||
func (n *networkNotifier) Connected(net network.Network, conn network.Conn) {
|
||||
slog.Info("Peer connected", "local", conn.LocalPeer(), "remote", conn.RemotePeer())
|
||||
}
|
||||
func (n *networkNotifier) Disconnected(net network.Network, conn network.Conn) {
|
||||
slog.Info("Peer disconnected", "local", conn.LocalPeer(), "remote", conn.RemotePeer())
|
||||
}
|
||||
func (n *networkNotifier) Listen(net network.Network, addr multiaddr.Multiaddr) {}
|
||||
func (n *networkNotifier) ListenClose(net network.Network, addr multiaddr.Multiaddr) {}
|
||||
|
||||
type ICEMessage struct {
|
||||
PeerID string
|
||||
TargetID string
|
||||
RoomID ulid.ULID
|
||||
Candidate []byte
|
||||
}
|
||||
|
||||
type Relay struct {
|
||||
ID peer.ID
|
||||
Rooms *common.SafeMap[ulid.ULID, *Room]
|
||||
Host host.Host // libp2p host for peer-to-peer networking
|
||||
PubSub *pubsub.PubSub // PubSub for state synchronization
|
||||
MeshState *common.SafeMap[ulid.ULID, RoomInfo] // room ID -> state
|
||||
RelayPCs *common.SafeMap[ulid.ULID, *webrtc.PeerConnection] // room ID -> relay PeerConnection
|
||||
pubTopicState *pubsub.Topic // topic for room states
|
||||
pubTopicICECandidate *pubsub.Topic // topic for ICE candidates aimed to this relay
|
||||
}
|
||||
|
||||
func NewRelay(ctx context.Context, port int) (*Relay, error) {
|
||||
listenAddrs := []string{
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port), // IPv4
|
||||
fmt.Sprintf("/ip6/::/tcp/%d", port), // IPv6
|
||||
}
|
||||
|
||||
// Use "testToken" as the pre-shared token for authentication
|
||||
// TODO: Give via flags, before PR commit
|
||||
token := "testToken"
|
||||
// Generate 32-byte PSK from the token using SHA-256
|
||||
shaToken := sha256.Sum256([]byte(token))
|
||||
tokenPSK := pnet.PSK(shaToken[:])
|
||||
|
||||
// Initialize libp2p host
|
||||
p2pHost, err := libp2p.New(
|
||||
libp2p.ListenAddrStrings(listenAddrs...),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.EnableRelay(),
|
||||
libp2p.EnableHolePunching(),
|
||||
libp2p.PrivateNetwork(tokenPSK),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create libp2p host for relay: %w", err)
|
||||
}
|
||||
|
||||
// Set up pubsub
|
||||
p2pPubsub, err := pubsub.NewGossipSub(ctx, p2pHost)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pubsub: %w", err)
|
||||
}
|
||||
|
||||
// Add network notifier to log connections
|
||||
p2pHost.Network().Notify(&networkNotifier{})
|
||||
|
||||
r := &Relay{
|
||||
ID: p2pHost.ID(),
|
||||
Host: p2pHost,
|
||||
PubSub: p2pPubsub,
|
||||
Rooms: common.NewSafeMap[ulid.ULID, *Room](),
|
||||
MeshState: common.NewSafeMap[ulid.ULID, RoomInfo](),
|
||||
RelayPCs: common.NewSafeMap[ulid.ULID, *webrtc.PeerConnection](),
|
||||
}
|
||||
|
||||
// Set up state synchronization and stream handling
|
||||
r.setupStateSync(ctx)
|
||||
r.setupStreamHandler()
|
||||
|
||||
slog.Info("Relay initialized", "id", r.ID, "addrs", p2pHost.Addrs())
|
||||
|
||||
peerInfo := peer.AddrInfo{
|
||||
ID: p2pHost.ID(),
|
||||
Addrs: p2pHost.Addrs(),
|
||||
}
|
||||
addrs, err := peer.AddrInfoToP2pAddrs(&peerInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert peer info to addresses: %w", err)
|
||||
}
|
||||
|
||||
slog.Debug("Connect with one of the following URLs below:")
|
||||
for _, addr := range addrs {
|
||||
slog.Debug(fmt.Sprintf("- %s", addr.String()))
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func InitRelay(ctx context.Context, ctxCancel context.CancelFunc, port int) error {
|
||||
var err error
|
||||
globalRelay, err = NewRelay(ctx, port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relay: %w", err)
|
||||
}
|
||||
|
||||
if err := common.InitWebRTCAPI(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := InitHTTPEndpoint(ctx, ctxCancel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Relay initialized", "id", globalRelay.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetRelay() *Relay {
|
||||
return globalRelay
|
||||
}
|
||||
|
||||
func (r *Relay) GetRoomByID(id ulid.ULID) *Room {
|
||||
if room, ok := r.Rooms.Get(id); ok {
|
||||
return room
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Relay) GetOrCreateRoom(name string) *Room {
|
||||
if room := r.GetRoomByName(name); room != nil {
|
||||
return room
|
||||
}
|
||||
|
||||
id, err := common.NewULID()
|
||||
if err != nil {
|
||||
slog.Error("Failed to generate new ULID for room", "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
room := NewRoom(name, id, r.ID)
|
||||
room.Relay = r
|
||||
r.Rooms.Set(room.ID, room)
|
||||
|
||||
slog.Debug("Created new room", "name", name, "id", room.ID)
|
||||
return room
|
||||
}
|
||||
|
||||
func (r *Relay) DeleteRoomIfEmpty(room *Room) {
|
||||
participantCount := room.Participants.Len()
|
||||
if participantCount > 0 {
|
||||
slog.Debug("Room not empty, not deleting", "name", room.Name, "id", room.ID, "participants", participantCount)
|
||||
return
|
||||
}
|
||||
|
||||
// Create a "tombstone" state for the room, this allows propagation of the room deletion
|
||||
tombstoneState := RoomInfo{
|
||||
ID: room.ID,
|
||||
Name: room.Name,
|
||||
Online: false,
|
||||
OwnerID: room.OwnerID,
|
||||
}
|
||||
|
||||
// Publish updated state to mesh
|
||||
if err := r.publishRoomState(context.Background(), tombstoneState); err != nil {
|
||||
slog.Error("Failed to publish room states on change", "room", room.Name, "err", err)
|
||||
}
|
||||
|
||||
slog.Info("Deleting room since empty and offline", "name", room.Name, "id", room.ID)
|
||||
r.Rooms.Delete(room.ID)
|
||||
}
|
||||
|
||||
func (r *Relay) setupStateSync(ctx context.Context) {
|
||||
var err error
|
||||
r.pubTopicState, err = r.PubSub.Join("room-states")
|
||||
if err != nil {
|
||||
slog.Error("Failed to join pubsub topic", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
sub, err := r.pubTopicState.Subscribe()
|
||||
if err != nil {
|
||||
slog.Error("Failed to subscribe to topic", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.pubTopicICECandidate, err = r.PubSub.Join("ice-candidates")
|
||||
if err != nil {
|
||||
slog.Error("Failed to join ICE candidates topic", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
iceCandidateSub, err := r.pubTopicICECandidate.Subscribe()
|
||||
if err != nil {
|
||||
slog.Error("Failed to subscribe to ICE candidates topic", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle state updates only from authenticated peers
|
||||
go func() {
|
||||
for {
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
slog.Error("Error receiving pubsub message", "err", err)
|
||||
return
|
||||
}
|
||||
if msg.GetFrom() == r.Host.ID() {
|
||||
continue // Ignore own messages
|
||||
}
|
||||
var states []RoomInfo
|
||||
if err := json.Unmarshal(msg.Data, &states); err != nil {
|
||||
slog.Error("Failed to unmarshal room states", "err", err)
|
||||
continue
|
||||
}
|
||||
r.updateMeshState(states)
|
||||
}
|
||||
}()
|
||||
|
||||
// Handle incoming ICE candidates for given room
|
||||
go func() {
|
||||
// Map of ICE candidate slices per room ID
|
||||
iceHolder := make(map[ulid.ULID][]webrtc.ICECandidateInit)
|
||||
|
||||
for {
|
||||
msg, err := iceCandidateSub.Next(ctx)
|
||||
if err != nil {
|
||||
slog.Error("Error receiving ICE candidate message", "err", err)
|
||||
return
|
||||
}
|
||||
if msg.GetFrom() == r.Host.ID() {
|
||||
continue // Ignore own messages
|
||||
}
|
||||
|
||||
var iceMsg ICEMessage
|
||||
if err := json.Unmarshal(msg.Data, &iceMsg); err != nil {
|
||||
slog.Error("Failed to unmarshal ICE candidate message", "err", err)
|
||||
continue
|
||||
}
|
||||
if iceMsg.TargetID != r.ID.String() {
|
||||
continue // Ignore messages not meant for this relay
|
||||
}
|
||||
|
||||
if iceHolder[iceMsg.RoomID] == nil {
|
||||
iceHolder[iceMsg.RoomID] = make([]webrtc.ICECandidateInit, 0)
|
||||
}
|
||||
|
||||
if pc, ok := r.RelayPCs.Get(iceMsg.RoomID); ok {
|
||||
// Unmarshal ice candidate
|
||||
var candidate webrtc.ICECandidateInit
|
||||
if err := json.Unmarshal(iceMsg.Candidate, &candidate); err != nil {
|
||||
slog.Error("Failed to unmarshal ICE candidate", "err", err)
|
||||
continue
|
||||
}
|
||||
if pc.RemoteDescription() != nil {
|
||||
if err := pc.AddICECandidate(candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate", "err", err)
|
||||
}
|
||||
// Add any held candidates
|
||||
for _, heldCandidate := range iceHolder[iceMsg.RoomID] {
|
||||
if err := pc.AddICECandidate(heldCandidate); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate", "err", err)
|
||||
}
|
||||
}
|
||||
iceHolder[iceMsg.RoomID] = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
iceHolder[iceMsg.RoomID] = append(iceHolder[iceMsg.RoomID], candidate)
|
||||
}
|
||||
} else {
|
||||
slog.Error("PeerConnection for room not found when adding ICE candidate", "roomID", iceMsg.RoomID)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *Relay) publishRoomState(ctx context.Context, state RoomInfo) error {
|
||||
data, err := json.Marshal([]RoomInfo{state})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.pubTopicState.Publish(ctx, data)
|
||||
}
|
||||
|
||||
func (r *Relay) publishRoomStates(ctx context.Context) error {
|
||||
var states []RoomInfo
|
||||
for _, room := range r.Rooms.Copy() {
|
||||
states = append(states, RoomInfo{
|
||||
ID: room.ID,
|
||||
Name: room.Name,
|
||||
Online: room.Online,
|
||||
OwnerID: r.ID,
|
||||
})
|
||||
}
|
||||
data, err := json.Marshal(states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.pubTopicState.Publish(ctx, data)
|
||||
}
|
||||
|
||||
func (r *Relay) updateMeshState(states []RoomInfo) {
|
||||
for _, state := range states {
|
||||
if state.OwnerID == r.ID {
|
||||
continue // Skip own state
|
||||
}
|
||||
existing, exists := r.MeshState.Get(state.ID)
|
||||
r.MeshState.Set(state.ID, state)
|
||||
slog.Debug("Updated mesh state", "room", state.Name, "online", state.Online, "owner", state.OwnerID)
|
||||
|
||||
// React to state changes
|
||||
if !exists || existing.Online != state.Online {
|
||||
room := r.GetRoomByName(state.Name)
|
||||
if state.Online {
|
||||
if room == nil || !room.Online {
|
||||
slog.Info("Room became active remotely, requesting stream", "room", state.Name, "owner", state.OwnerID)
|
||||
go func() {
|
||||
if _, err := r.requestStream(context.Background(), state.Name, state.ID, state.OwnerID); err != nil {
|
||||
slog.Error("Failed to request stream", "room", state.Name, "err", err)
|
||||
} else {
|
||||
slog.Info("Successfully requested stream", "room", state.Name, "owner", state.OwnerID)
|
||||
}
|
||||
}()
|
||||
}
|
||||
} else if room != nil && room.Online {
|
||||
slog.Info("Room became inactive remotely, stopping local stream", "room", state.Name)
|
||||
if pc, ok := r.RelayPCs.Get(state.ID); ok {
|
||||
_ = pc.Close()
|
||||
r.RelayPCs.Delete(state.ID)
|
||||
}
|
||||
room.Online = false
|
||||
room.signalParticipantsOffline()
|
||||
} else if room == nil && !exists {
|
||||
slog.Info("Received tombstone state for room", "name", state.Name, "id", state.ID)
|
||||
if pc, ok := r.RelayPCs.Get(state.ID); ok {
|
||||
_ = pc.Close()
|
||||
r.RelayPCs.Delete(state.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Relay) IsRoomActive(roomID ulid.ULID) (bool, peer.ID) {
|
||||
if state, exists := r.MeshState.Get(roomID); exists && state.Online {
|
||||
return true, state.OwnerID
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func (r *Relay) GetRoomByName(name string) *Room {
|
||||
for _, room := range r.Rooms.Copy() {
|
||||
if room.Name == name {
|
||||
return room
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeMessage(stream network.Stream, data []byte) error {
|
||||
length := uint32(len(data))
|
||||
if err := binary.Write(stream, binary.BigEndian, length); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := stream.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func readMessage(stream network.Stream) ([]byte, error) {
|
||||
var length uint32
|
||||
if err := binary.Read(stream, binary.BigEndian, &length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make([]byte, length)
|
||||
_, err := io.ReadFull(stream, data)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (r *Relay) setupStreamHandler() {
|
||||
r.Host.SetStreamHandler("/nestri-relay/stream/1.0.0", func(stream network.Stream) {
|
||||
defer func(stream network.Stream) {
|
||||
err := stream.Close()
|
||||
if err != nil {
|
||||
slog.Error("Failed to close stream", "err", err)
|
||||
}
|
||||
}(stream)
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
roomNameData, err := readMessage(stream)
|
||||
if err != nil && err != io.EOF {
|
||||
slog.Error("Failed to read room name", "peer", remotePeer, "err", err)
|
||||
return
|
||||
}
|
||||
roomName := string(roomNameData)
|
||||
|
||||
slog.Info("Stream request from peer", "peer", remotePeer, "room", roomName)
|
||||
|
||||
room := r.GetRoomByName(roomName)
|
||||
if room == nil || !room.Online {
|
||||
slog.Error("Cannot provide stream for inactive room", "room", roomName)
|
||||
return
|
||||
}
|
||||
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
r.RelayPCs.Delete(room.ID)
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create relay PeerConnection", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.RelayPCs.Set(room.ID, pc)
|
||||
|
||||
if room.AudioTrack != nil {
|
||||
_, err := pc.AddTrack(room.AudioTrack)
|
||||
if err != nil {
|
||||
slog.Error("Failed to add audio track", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if room.VideoTrack != nil {
|
||||
_, err := pc.AddTrack(room.VideoTrack)
|
||||
if err != nil {
|
||||
slog.Error("Failed to add video track", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
settingOrdered := true
|
||||
settingMaxRetransmits := uint16(0)
|
||||
dc, err := pc.CreateDataChannel("relay-data", &webrtc.DataChannelInit{
|
||||
Ordered: &settingOrdered,
|
||||
MaxRetransmits: &settingMaxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to create relay DataChannel", "err", err)
|
||||
return
|
||||
}
|
||||
relayDC := connections.NewNestriDataChannel(dc)
|
||||
|
||||
relayDC.RegisterOnOpen(func() {
|
||||
slog.Debug("Relay DataChannel opened", "room", roomName)
|
||||
})
|
||||
|
||||
relayDC.RegisterOnClose(func() {
|
||||
slog.Debug("Relay DataChannel closed", "room", roomName)
|
||||
})
|
||||
|
||||
relayDC.RegisterMessageCallback("input", func(data []byte) {
|
||||
if room.DataChannel != nil {
|
||||
// Forward message to the room's data channel
|
||||
if err := room.DataChannel.SendBinary(data); err != nil {
|
||||
slog.Error("Failed to send DataChannel message", "room", roomName, "err", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
offer, err := pc.CreateOffer(nil)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create offer", "err", err)
|
||||
return
|
||||
}
|
||||
if err := pc.SetLocalDescription(offer); err != nil {
|
||||
slog.Error("Failed to set local description", "err", err)
|
||||
return
|
||||
}
|
||||
offerData, err := json.Marshal(offer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal offer", "err", err)
|
||||
return
|
||||
}
|
||||
if err := writeMessage(stream, offerData); err != nil {
|
||||
slog.Error("Failed to send offer", "peer", remotePeer, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle our generated ICE candidates
|
||||
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
candidateData, err := json.Marshal(candidate.ToJSON())
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal ICE candidate", "err", err)
|
||||
return
|
||||
}
|
||||
iceMsg := ICEMessage{
|
||||
PeerID: r.Host.ID().String(),
|
||||
TargetID: remotePeer.String(),
|
||||
RoomID: room.ID,
|
||||
Candidate: candidateData,
|
||||
}
|
||||
data, err := json.Marshal(iceMsg)
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal ICE message", "err", err)
|
||||
return
|
||||
}
|
||||
if err := r.pubTopicICECandidate.Publish(context.Background(), data); err != nil {
|
||||
slog.Error("Failed to publish ICE candidate message", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
answerData, err := readMessage(stream)
|
||||
if err != nil && err != io.EOF {
|
||||
slog.Error("Failed to read answer", "peer", remotePeer, "err", err)
|
||||
return
|
||||
}
|
||||
var answer webrtc.SessionDescription
|
||||
if err := json.Unmarshal(answerData, &answer); err != nil {
|
||||
slog.Error("Failed to unmarshal answer", "err", err)
|
||||
return
|
||||
}
|
||||
if err := pc.SetRemoteDescription(answer); err != nil {
|
||||
slog.Error("Failed to set remote description", "err", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Relay) requestStream(ctx context.Context, roomName string, roomID ulid.ULID, providerPeer peer.ID) (*webrtc.PeerConnection, error) {
|
||||
stream, err := r.Host.NewStream(ctx, providerPeer, "/nestri-relay/stream/1.0.0")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create stream: %w", err)
|
||||
}
|
||||
defer func(stream network.Stream) {
|
||||
err := stream.Close()
|
||||
if err != nil {
|
||||
slog.Error("Failed to close stream", "err", err)
|
||||
}
|
||||
}(stream)
|
||||
|
||||
if err := writeMessage(stream, []byte(roomName)); err != nil {
|
||||
return nil, fmt.Errorf("failed to send room name: %w", err)
|
||||
}
|
||||
|
||||
room := r.GetRoomByName(roomName)
|
||||
if room == nil {
|
||||
room = NewRoom(roomName, roomID, providerPeer)
|
||||
r.Rooms.Set(roomID, room)
|
||||
} else if room.ID != roomID {
|
||||
// Mismatch, prefer the one from the provider
|
||||
// TODO: When mesh is created, if there are mismatches, we should have relays negotiate common room IDs
|
||||
room.ID = roomID
|
||||
room.OwnerID = providerPeer
|
||||
r.Rooms.Set(roomID, room)
|
||||
}
|
||||
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
r.RelayPCs.Delete(roomID)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create PeerConnection: %w", err)
|
||||
}
|
||||
|
||||
r.RelayPCs.Set(roomID, pc)
|
||||
|
||||
offerData, err := readMessage(stream)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, fmt.Errorf("failed to read offer: %w", err)
|
||||
}
|
||||
var offer webrtc.SessionDescription
|
||||
if err := json.Unmarshal(offerData, &offer); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal offer: %w", err)
|
||||
}
|
||||
if err := pc.SetRemoteDescription(offer); err != nil {
|
||||
return nil, fmt.Errorf("failed to set remote description: %w", err)
|
||||
}
|
||||
|
||||
pc.OnTrack(func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
|
||||
localTrack, _ := webrtc.NewTrackLocalStaticRTP(track.Codec().RTPCodecCapability, track.ID(), "relay-"+roomName+"-"+track.Kind().String())
|
||||
slog.Debug("Received track for mesh relay room", "room", roomName, "kind", track.Kind())
|
||||
|
||||
room.SetTrack(track.Kind(), localTrack)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
rtpPacket, _, err := track.ReadRTP()
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
slog.Error("Failed to read RTP packet from remote track for room", "room", roomName, "err", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
err = localTrack.WriteRTP(rtpPacket)
|
||||
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
slog.Error("Failed to write RTP to local track for room", "room", room.Name, "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// ICE candidate handling
|
||||
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
candidateData, err := json.Marshal(candidate.ToJSON())
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal ICE candidate", "err", err)
|
||||
return
|
||||
}
|
||||
iceMsg := ICEMessage{
|
||||
PeerID: r.Host.ID().String(),
|
||||
TargetID: providerPeer.String(),
|
||||
RoomID: roomID,
|
||||
Candidate: candidateData,
|
||||
}
|
||||
data, err := json.Marshal(iceMsg)
|
||||
if err != nil {
|
||||
slog.Error("Failed to marshal ICE message", "err", err)
|
||||
return
|
||||
}
|
||||
if err := r.pubTopicICECandidate.Publish(ctx, data); err != nil {
|
||||
slog.Error("Failed to publish ICE candidate message", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
relayDC := connections.NewNestriDataChannel(dc)
|
||||
slog.Debug("Received DataChannel from peer", "room", roomName)
|
||||
|
||||
relayDC.RegisterOnOpen(func() {
|
||||
slog.Debug("Relay DataChannel opened", "room", roomName)
|
||||
})
|
||||
|
||||
relayDC.OnClose(func() {
|
||||
slog.Debug("Relay DataChannel closed", "room", roomName)
|
||||
})
|
||||
|
||||
// Override room DataChannel with the mesh-relay one to forward messages
|
||||
room.DataChannel = relayDC
|
||||
})
|
||||
|
||||
answer, err := pc.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create answer: %w", err)
|
||||
}
|
||||
if err := pc.SetLocalDescription(answer); err != nil {
|
||||
return nil, fmt.Errorf("failed to set local description: %w", err)
|
||||
}
|
||||
answerData, err := json.Marshal(answer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal answer: %w", err)
|
||||
}
|
||||
if err := writeMessage(stream, answerData); err != nil {
|
||||
return nil, fmt.Errorf("failed to send answer: %w", err)
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// ConnectToRelay manually connects to another relay by its multiaddress
|
||||
func (r *Relay) ConnectToRelay(ctx context.Context, addr string) error {
|
||||
// Parse the multiaddress
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
slog.Error("Invalid multiaddress", "addr", addr, "err", err)
|
||||
return fmt.Errorf("invalid multiaddress: %w", err)
|
||||
}
|
||||
|
||||
// Extract peer ID from multiaddress
|
||||
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
||||
if err != nil {
|
||||
slog.Error("Failed to extract peer info", "addr", addr, "err", err)
|
||||
return fmt.Errorf("failed to extract peer info: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the peer
|
||||
if err := r.Host.Connect(ctx, *peerInfo); err != nil {
|
||||
slog.Error("Failed to connect to peer", "peer", peerInfo.ID, "addr", addr, "err", err)
|
||||
return fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
|
||||
// Publish challenge on join
|
||||
//go r.sendAuthChallenge(ctx)
|
||||
|
||||
slog.Info("Successfully connected to peer", "peer", peerInfo.ID, "addr", addr)
|
||||
return nil
|
||||
}
|
||||
44
packages/relay/internal/shared/participant.go
Normal file
44
packages/relay/internal/shared/participant.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
type Participant struct {
|
||||
ID ulid.ULID
|
||||
PeerConnection *webrtc.PeerConnection
|
||||
DataChannel *connections.NestriDataChannel
|
||||
}
|
||||
|
||||
func NewParticipant() (*Participant, error) {
|
||||
id, err := common.NewULID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ULID for Participant: %w", err)
|
||||
}
|
||||
return &Participant{
|
||||
ID: id,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Participant) addTrack(trackLocal *webrtc.TrackLocalStaticRTP) error {
|
||||
rtpSender, err := p.PeerConnection.AddTrack(trackLocal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
rtcpBuffer := make([]byte, 1400)
|
||||
for {
|
||||
if _, _, rtcpErr := rtpSender.Read(rtcpBuffer); rtcpErr != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,32 +1,28 @@
|
||||
package internal
|
||||
package shared
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"log/slog"
|
||||
"relay/internal/common"
|
||||
"relay/internal/connections"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
type RoomInfo struct {
|
||||
ID ulid.ULID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Online bool `json:"online"`
|
||||
OwnerID peer.ID `json:"owner_id"`
|
||||
}
|
||||
|
||||
type Room struct {
|
||||
RoomInfo
|
||||
WebSocket *connections.SafeWebSocket
|
||||
PeerConnection *webrtc.PeerConnection
|
||||
AudioTrack *webrtc.TrackLocalStaticRTP
|
||||
VideoTrack *webrtc.TrackLocalStaticRTP
|
||||
DataChannel *connections.NestriDataChannel
|
||||
Participants *common.SafeMap[ulid.ULID, *Participant]
|
||||
Relay *Relay
|
||||
}
|
||||
|
||||
func NewRoom(name string, roomID ulid.ULID, ownerID peer.ID) *Room {
|
||||
@@ -34,21 +30,12 @@ func NewRoom(name string, roomID ulid.ULID, ownerID peer.ID) *Room {
|
||||
RoomInfo: RoomInfo{
|
||||
ID: roomID,
|
||||
Name: name,
|
||||
Online: false,
|
||||
OwnerID: ownerID,
|
||||
},
|
||||
Participants: common.NewSafeMap[ulid.ULID, *Participant](),
|
||||
}
|
||||
}
|
||||
|
||||
// AssignWebSocket assigns a WebSocket connection to a Room
|
||||
func (r *Room) AssignWebSocket(ws *connections.SafeWebSocket) {
|
||||
if r.WebSocket != nil {
|
||||
slog.Warn("WebSocket already assigned to room", "room", r.Name)
|
||||
}
|
||||
r.WebSocket = ws
|
||||
}
|
||||
|
||||
// AddParticipant adds a Participant to a Room
|
||||
func (r *Room) AddParticipant(participant *Participant) {
|
||||
slog.Debug("Adding participant to room", "participant", participant.ID, "room", r.Name)
|
||||
@@ -62,21 +49,8 @@ func (r *Room) removeParticipantByID(pID ulid.ULID) {
|
||||
}
|
||||
}
|
||||
|
||||
// Removes a Participant from a Room by participant's name
|
||||
func (r *Room) removeParticipantByName(pName string) {
|
||||
for id, participant := range r.Participants.Copy() {
|
||||
if participant.Name == pName {
|
||||
if err := r.signalParticipantOffline(participant); err != nil {
|
||||
slog.Error("Failed to signal participant offline", "participant", participant.ID, "room", r.Name, "err", err)
|
||||
}
|
||||
r.Participants.Delete(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Removes all participants from a Room
|
||||
func (r *Room) removeAllParticipants() {
|
||||
/*func (r *Room) removeAllParticipants() {
|
||||
for id, participant := range r.Participants.Copy() {
|
||||
if err := r.signalParticipantOffline(participant); err != nil {
|
||||
slog.Error("Failed to signal participant offline", "participant", participant.ID, "room", r.Name, "err", err)
|
||||
@@ -84,24 +58,28 @@ func (r *Room) removeAllParticipants() {
|
||||
r.Participants.Delete(id)
|
||||
slog.Debug("Removed participant from room", "participant", id, "room", r.Name)
|
||||
}
|
||||
}*/
|
||||
|
||||
// IsOnline checks if the room is online (has both audio and video tracks)
|
||||
func (r *Room) IsOnline() bool {
|
||||
return r.AudioTrack != nil && r.VideoTrack != nil
|
||||
}
|
||||
|
||||
func (r *Room) SetTrack(trackType webrtc.RTPCodecType, track *webrtc.TrackLocalStaticRTP) {
|
||||
//oldOnline := r.IsOnline()
|
||||
|
||||
switch trackType {
|
||||
case webrtc.RTPCodecTypeAudio:
|
||||
r.AudioTrack = track
|
||||
slog.Debug("Audio track set", "room", r.Name, "track", track != nil)
|
||||
case webrtc.RTPCodecTypeVideo:
|
||||
r.VideoTrack = track
|
||||
slog.Debug("Video track set", "room", r.Name, "track", track != nil)
|
||||
default:
|
||||
slog.Warn("Unknown track type", "room", r.Name, "trackType", trackType)
|
||||
}
|
||||
|
||||
newOnline := r.AudioTrack != nil && r.VideoTrack != nil
|
||||
if r.Online != newOnline {
|
||||
r.Online = newOnline
|
||||
if r.Online {
|
||||
/*newOnline := r.IsOnline()
|
||||
if oldOnline != newOnline {
|
||||
if newOnline {
|
||||
slog.Debug("Room online, participants will be signaled", "room", r.Name)
|
||||
r.signalParticipantsWithTracks()
|
||||
} else {
|
||||
@@ -109,15 +87,16 @@ func (r *Room) SetTrack(trackType webrtc.RTPCodecType, track *webrtc.TrackLocalS
|
||||
r.signalParticipantsOffline()
|
||||
}
|
||||
|
||||
// Publish updated state to mesh
|
||||
// TODO: Publish updated state to mesh
|
||||
go func() {
|
||||
if err := r.Relay.publishRoomStates(context.Background()); err != nil {
|
||||
slog.Error("Failed to publish room states on change", "room", r.Name, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
/* TODO: libp2p'ify
|
||||
func (r *Room) signalParticipantsWithTracks() {
|
||||
for _, participant := range r.Participants.Copy() {
|
||||
if err := r.signalParticipantWithTracks(participant); err != nil {
|
||||
@@ -162,3 +141,4 @@ func (r *Room) signalParticipantOffline(participant *Participant) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
@@ -2,12 +2,11 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"relay/internal"
|
||||
"relay/internal/common"
|
||||
"relay/internal/core"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -33,7 +32,7 @@ func main() {
|
||||
slog.SetDefault(logger)
|
||||
|
||||
// Start relay
|
||||
err := internal.InitRelay(mainCtx, mainStopper, common.GetFlags().MeshPort)
|
||||
err := core.InitRelay(mainCtx, mainStopper)
|
||||
if err != nil {
|
||||
slog.Error("Failed to initialize relay", "err", err)
|
||||
mainStopper()
|
||||
@@ -42,5 +41,5 @@ func main() {
|
||||
|
||||
// Wait for exit signal
|
||||
<-mainCtx.Done()
|
||||
log.Println("Shutting down gracefully by signal...")
|
||||
slog.Info("Shutting down gracefully by signal...")
|
||||
}
|
||||
|
||||
1763
packages/server/Cargo.lock
generated
1763
packages/server/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -19,14 +19,14 @@ webrtc = "0.13"
|
||||
regex = "1.11"
|
||||
rand = "0.9"
|
||||
rustls = { version = "0.23", features = ["ring"] }
|
||||
tokio-tungstenite = { version = "0.26", features = ["native-tls"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
chrono = "0.4"
|
||||
futures-util = "0.3"
|
||||
num-derive = "0.4"
|
||||
num-traits = "0.2"
|
||||
prost = "0.13"
|
||||
prost-types = "0.13"
|
||||
parking_lot = "0.12"
|
||||
atomic_refcell = "0.1"
|
||||
atomic_refcell = "0.1"
|
||||
byteorder = "1.5"
|
||||
libp2p = { version = "0.55", features = ["identify", "dns", "tcp", "noise", "ping", "tokio", "serde", "yamux", "macros"] }
|
||||
libp2p-stream = "0.3.0-alpha"
|
||||
@@ -4,14 +4,14 @@ mod gpu;
|
||||
mod latency;
|
||||
mod messages;
|
||||
mod nestrisink;
|
||||
mod p2p;
|
||||
mod proto;
|
||||
mod websocket;
|
||||
|
||||
use crate::args::encoding_args;
|
||||
use crate::enc_helper::EncoderType;
|
||||
use crate::gpu::GPUVendor;
|
||||
use crate::nestrisink::NestriSignaller;
|
||||
use crate::websocket::NestriWebSocket;
|
||||
use crate::p2p::p2p::NestriP2P;
|
||||
use futures_util::StreamExt;
|
||||
use gst::prelude::*;
|
||||
use gstrswebrtc::signaller::Signallable;
|
||||
@@ -19,6 +19,8 @@ use gstrswebrtc::webrtcsink::BaseWebRTCSink;
|
||||
use std::error::Error;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
|
||||
// Handles gathering GPU information and selecting the most suitable GPU
|
||||
fn handle_gpus(args: &args::Args) -> Result<gpu::GPUInfo, Box<dyn Error>> {
|
||||
@@ -165,32 +167,29 @@ fn handle_encoder_audio(args: &args::Args) -> String {
|
||||
async fn main() -> Result<(), Box<dyn Error>> {
|
||||
// Parse command line arguments
|
||||
let mut args = args::Args::new();
|
||||
if args.app.verbose {
|
||||
// Make sure tracing has INFO level
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.init();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
EnvFilter::builder()
|
||||
.with_default_directive(LevelFilter::INFO.into())
|
||||
.from_env()?,
|
||||
)
|
||||
.init();
|
||||
|
||||
if args.app.verbose {
|
||||
args.debug_print();
|
||||
} else {
|
||||
tracing_subscriber::fmt::init();
|
||||
}
|
||||
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("Failed to install ring crypto provider");
|
||||
|
||||
// Begin connection attempt to the relay WebSocket endpoint
|
||||
// replace any http/https with ws/wss
|
||||
let replaced_relay_url = args
|
||||
.app
|
||||
.relay_url
|
||||
.replace("http://", "ws://")
|
||||
.replace("https://", "wss://");
|
||||
let ws_url = format!("{}/api/ws/{}", replaced_relay_url, args.app.room,);
|
||||
// Get relay URL from arguments
|
||||
let relay_url = args.app.relay_url.trim();
|
||||
|
||||
// Setup our websocket
|
||||
let nestri_ws = Arc::new(NestriWebSocket::new(ws_url).await?);
|
||||
// Initialize libp2p (logically the sink should handle the connection to be independent)
|
||||
let nestri_p2p = Arc::new(NestriP2P::new().await?);
|
||||
let p2p_conn = nestri_p2p.connect(relay_url).await?;
|
||||
|
||||
gst::init()?;
|
||||
gstrswebrtc::plugin_register_static()?;
|
||||
@@ -328,7 +327,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||
|
||||
/* Output */
|
||||
// WebRTC sink Element
|
||||
let signaller = NestriSignaller::new(nestri_ws.clone(), video_source.clone());
|
||||
let signaller =
|
||||
NestriSignaller::new(args.app.room, p2p_conn.clone(), video_source.clone()).await?;
|
||||
let webrtcsink = BaseWebRTCSink::with_signaller(Signallable::from(signaller.clone()));
|
||||
webrtcsink.set_property_from_str("stun-server", "stun://stun.l.google.com:19302");
|
||||
webrtcsink.set_property_from_str("congestion-control", "disabled");
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
use crate::latency::LatencyTracker;
|
||||
use num_derive::{FromPrimitive, ToPrimitive};
|
||||
use num_traits::{FromPrimitive, ToPrimitive};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use webrtc::ice_transport::ice_candidate::RTCIceCandidateInit;
|
||||
use webrtc::peer_connection::sdp::session_description::RTCSessionDescription;
|
||||
|
||||
@@ -12,6 +9,13 @@ pub struct MessageBase {
|
||||
pub latency: Option<LatencyTracker>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MessageRaw {
|
||||
#[serde(flatten)]
|
||||
pub base: MessageBase,
|
||||
pub data: serde_json::Value,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MessageLog {
|
||||
#[serde(flatten)]
|
||||
@@ -44,76 +48,3 @@ pub struct MessageSDP {
|
||||
pub base: MessageBase,
|
||||
pub sdp: RTCSessionDescription,
|
||||
}
|
||||
|
||||
#[repr(i32)]
|
||||
#[derive(Debug, FromPrimitive, ToPrimitive, Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(try_from = "i32", into = "i32")]
|
||||
pub enum JoinerType {
|
||||
JoinerNode = 0,
|
||||
JoinerClient = 1,
|
||||
}
|
||||
impl TryFrom<i32> for JoinerType {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: i32) -> Result<Self, Self::Error> {
|
||||
JoinerType::from_i32(value).ok_or("Invalid value for JoinerType")
|
||||
}
|
||||
}
|
||||
impl From<JoinerType> for i32 {
|
||||
fn from(joiner_type: JoinerType) -> Self {
|
||||
joiner_type.to_i32().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MessageJoin {
|
||||
#[serde(flatten)]
|
||||
pub base: MessageBase,
|
||||
pub joiner_type: JoinerType,
|
||||
}
|
||||
|
||||
#[repr(i32)]
|
||||
#[derive(Debug, FromPrimitive, ToPrimitive, Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(try_from = "i32", into = "i32")]
|
||||
pub enum AnswerType {
|
||||
AnswerOffline = 0,
|
||||
AnswerInUse = 1,
|
||||
AnswerOK = 2,
|
||||
}
|
||||
impl TryFrom<i32> for AnswerType {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: i32) -> Result<Self, Self::Error> {
|
||||
AnswerType::from_i32(value).ok_or("Invalid value for AnswerType")
|
||||
}
|
||||
}
|
||||
impl From<AnswerType> for i32 {
|
||||
fn from(answer_type: AnswerType) -> Self {
|
||||
answer_type.to_i32().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MessageAnswer {
|
||||
#[serde(flatten)]
|
||||
pub base: MessageBase,
|
||||
pub answer_type: AnswerType,
|
||||
}
|
||||
|
||||
pub fn encode_message<T: Serialize>(message: &T) -> Result<String, Box<dyn Error>> {
|
||||
// Serialize the message to JSON
|
||||
let json = serde_json::to_string(message)?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
pub fn decode_message(data: String) -> Result<MessageBase, Box<dyn Error + Send + Sync>> {
|
||||
let base_message: MessageBase = serde_json::from_str(&data)?;
|
||||
Ok(base_message)
|
||||
}
|
||||
|
||||
pub fn decode_message_as<T: for<'de> Deserialize<'de>>(
|
||||
data: String,
|
||||
) -> Result<T, Box<dyn Error + Send + Sync>> {
|
||||
let message: T = serde_json::from_str(&data)?;
|
||||
Ok(message)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
use crate::messages::{
|
||||
AnswerType, JoinerType, MessageAnswer, MessageBase, MessageICE, MessageJoin, MessageSDP,
|
||||
decode_message_as, encode_message,
|
||||
};
|
||||
use crate::messages::{MessageBase, MessageICE, MessageRaw, MessageSDP};
|
||||
use crate::p2p::p2p::NestriConnection;
|
||||
use crate::p2p::p2p_protocol_stream::NestriStreamProtocol;
|
||||
use crate::proto::proto::proto_input::InputType::{
|
||||
KeyDown, KeyUp, MouseKeyDown, MouseKeyUp, MouseMove, MouseMoveAbs, MouseWheel,
|
||||
};
|
||||
use crate::proto::proto::{ProtoInput, ProtoMessageInput};
|
||||
use crate::websocket::NestriWebSocket;
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use glib::subclass::prelude::*;
|
||||
use gst::glib;
|
||||
@@ -20,22 +18,37 @@ use webrtc::ice_transport::ice_candidate::RTCIceCandidateInit;
|
||||
use webrtc::peer_connection::sdp::session_description::RTCSessionDescription;
|
||||
|
||||
pub struct Signaller {
|
||||
nestri_ws: PLRwLock<Option<Arc<NestriWebSocket>>>,
|
||||
stream_room: PLRwLock<Option<String>>,
|
||||
stream_protocol: PLRwLock<Option<Arc<NestriStreamProtocol>>>,
|
||||
wayland_src: PLRwLock<Option<Arc<gst::Element>>>,
|
||||
data_channel: AtomicRefCell<Option<gst_webrtc::WebRTCDataChannel>>,
|
||||
}
|
||||
impl Default for Signaller {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
nestri_ws: PLRwLock::new(None),
|
||||
stream_room: PLRwLock::new(None),
|
||||
stream_protocol: PLRwLock::new(None),
|
||||
wayland_src: PLRwLock::new(None),
|
||||
data_channel: AtomicRefCell::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Signaller {
|
||||
pub fn set_nestri_ws(&self, nestri_ws: Arc<NestriWebSocket>) {
|
||||
*self.nestri_ws.write() = Some(nestri_ws);
|
||||
pub async fn set_nestri_connection(
|
||||
&self,
|
||||
nestri_conn: NestriConnection,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let stream_protocol = NestriStreamProtocol::new(nestri_conn).await?;
|
||||
*self.stream_protocol.write() = Some(Arc::new(stream_protocol));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_stream_room(&self, room: String) {
|
||||
*self.stream_room.write() = Some(room);
|
||||
}
|
||||
|
||||
fn get_stream_protocol(&self) -> Option<Arc<NestriStreamProtocol>> {
|
||||
self.stream_protocol.read().clone()
|
||||
}
|
||||
|
||||
pub fn set_wayland_src(&self, wayland_src: Arc<gst::Element>) {
|
||||
@@ -58,16 +71,14 @@ impl Signaller {
|
||||
|
||||
/// Helper method to clean things up
|
||||
fn register_callbacks(&self) {
|
||||
let nestri_ws = {
|
||||
self.nestri_ws
|
||||
.read()
|
||||
.clone()
|
||||
.expect("NestriWebSocket not set")
|
||||
let Some(stream_protocol) = self.get_stream_protocol() else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Stream protocol not set");
|
||||
return;
|
||||
};
|
||||
{
|
||||
let self_obj = self.obj().clone();
|
||||
let _ = nestri_ws.register_callback("sdp", move |data| {
|
||||
if let Ok(message) = decode_message_as::<MessageSDP>(data) {
|
||||
stream_protocol.register_callback("answer", move |data| {
|
||||
if let Ok(message) = serde_json::from_slice::<MessageSDP>(&data) {
|
||||
let sdp =
|
||||
gst_sdp::SDPMessage::parse_buffer(message.sdp.sdp.as_bytes()).unwrap();
|
||||
let answer = WebRTCSessionDescription::new(WebRTCSDPType::Answer, sdp);
|
||||
@@ -82,12 +93,11 @@ impl Signaller {
|
||||
}
|
||||
{
|
||||
let self_obj = self.obj().clone();
|
||||
let _ = nestri_ws.register_callback("ice", move |data| {
|
||||
if let Ok(message) = decode_message_as::<MessageICE>(data) {
|
||||
stream_protocol.register_callback("ice-candidate", move |data| {
|
||||
if let Ok(message) = serde_json::from_slice::<MessageICE>(&data) {
|
||||
let candidate = message.candidate;
|
||||
let sdp_m_line_index = candidate.sdp_mline_index.unwrap_or(0) as u32;
|
||||
let sdp_mid = candidate.sdp_mid;
|
||||
|
||||
self_obj.emit_by_name::<()>(
|
||||
"handle-ice",
|
||||
&[
|
||||
@@ -104,29 +114,28 @@ impl Signaller {
|
||||
}
|
||||
{
|
||||
let self_obj = self.obj().clone();
|
||||
let _ = nestri_ws.register_callback("answer", move |data| {
|
||||
if let Ok(answer) = decode_message_as::<MessageAnswer>(data) {
|
||||
gst::info!(gst::CAT_DEFAULT, "Received answer: {:?}", answer);
|
||||
match answer.answer_type {
|
||||
AnswerType::AnswerOK => {
|
||||
gst::info!(gst::CAT_DEFAULT, "Received OK answer");
|
||||
// Send our SDP offer
|
||||
self_obj.emit_by_name::<()>(
|
||||
"session-requested",
|
||||
&[
|
||||
&"unique-session-id",
|
||||
&"consumer-identifier",
|
||||
&None::<WebRTCSessionDescription>,
|
||||
],
|
||||
);
|
||||
}
|
||||
AnswerType::AnswerInUse => {
|
||||
gst::error!(gst::CAT_DEFAULT, "Room is in use by another node");
|
||||
}
|
||||
AnswerType::AnswerOffline => {
|
||||
gst::warning!(gst::CAT_DEFAULT, "Room is offline");
|
||||
}
|
||||
stream_protocol.register_callback("push-stream-ok", move |data| {
|
||||
if let Ok(answer) = serde_json::from_slice::<MessageRaw>(&data) {
|
||||
// Decode room name string
|
||||
if let Some(room_name) = answer.data.as_str() {
|
||||
gst::info!(
|
||||
gst::CAT_DEFAULT,
|
||||
"Received OK answer for room: {}",
|
||||
room_name
|
||||
);
|
||||
} else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to decode room name from answer");
|
||||
}
|
||||
|
||||
// Send our SDP offer
|
||||
self_obj.emit_by_name::<()>(
|
||||
"session-requested",
|
||||
&[
|
||||
&"unique-session-id",
|
||||
&"consumer-identifier",
|
||||
&None::<WebRTCSessionDescription>,
|
||||
],
|
||||
);
|
||||
} else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to decode answer");
|
||||
}
|
||||
@@ -177,89 +186,32 @@ impl SignallableImpl for Signaller {
|
||||
fn start(&self) {
|
||||
gst::info!(gst::CAT_DEFAULT, "Signaller started");
|
||||
|
||||
// Get WebSocket connection
|
||||
let nestri_ws = {
|
||||
self.nestri_ws
|
||||
.read()
|
||||
.clone()
|
||||
.expect("NestriWebSocket not set")
|
||||
};
|
||||
|
||||
// Register message callbacks
|
||||
self.register_callbacks();
|
||||
|
||||
// Subscribe to reconnection notifications
|
||||
let reconnected_notify = nestri_ws.subscribe_reconnected();
|
||||
// TODO: Re-implement reconnection handling
|
||||
|
||||
// Clone necessary references
|
||||
let self_clone = self.obj().clone();
|
||||
let nestri_ws_clone = nestri_ws.clone();
|
||||
let Some(stream_room) = self.stream_room.read().clone() else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Stream room not set");
|
||||
return;
|
||||
};
|
||||
|
||||
// Spawn a task to handle actions upon reconnection
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// Wait for a reconnection notification
|
||||
reconnected_notify.notified().await;
|
||||
|
||||
tracing::warn!("Reconnected to relay, re-negotiating...");
|
||||
gst::warning!(gst::CAT_DEFAULT, "Reconnected to relay, re-negotiating...");
|
||||
|
||||
// Emit "session-ended" first to make sure the element is cleaned up
|
||||
self_clone.emit_by_name::<bool>("session-ended", &[&"unique-session-id"]);
|
||||
|
||||
// Send a new join message
|
||||
let join_msg = MessageJoin {
|
||||
base: MessageBase {
|
||||
payload_type: "join".to_string(),
|
||||
latency: None,
|
||||
},
|
||||
joiner_type: JoinerType::JoinerNode,
|
||||
};
|
||||
if let Ok(encoded) = encode_message(&join_msg) {
|
||||
if let Err(e) = nestri_ws_clone.send_message(encoded) {
|
||||
gst::error!(
|
||||
gst::CAT_DEFAULT,
|
||||
"Failed to send join message after reconnection: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
} else {
|
||||
gst::error!(
|
||||
gst::CAT_DEFAULT,
|
||||
"Failed to encode join message after reconnection"
|
||||
);
|
||||
}
|
||||
|
||||
// If we need to interact with GStreamer or GLib, schedule it on the main thread
|
||||
let self_clone_for_main = self_clone.clone();
|
||||
glib::MainContext::default().invoke(move || {
|
||||
// Emit the "session-requested" signal
|
||||
self_clone_for_main.emit_by_name::<()>(
|
||||
"session-requested",
|
||||
&[
|
||||
&"unique-session-id",
|
||||
&"consumer-identifier",
|
||||
&None::<WebRTCSessionDescription>,
|
||||
],
|
||||
);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let join_msg = MessageJoin {
|
||||
let push_msg = MessageRaw {
|
||||
base: MessageBase {
|
||||
payload_type: "join".to_string(),
|
||||
payload_type: "push-stream-room".to_string(),
|
||||
latency: None,
|
||||
},
|
||||
joiner_type: JoinerType::JoinerNode,
|
||||
data: serde_json::Value::from(stream_room),
|
||||
};
|
||||
if let Ok(encoded) = encode_message(&join_msg) {
|
||||
if let Err(e) = nestri_ws.send_message(encoded) {
|
||||
tracing::error!("Failed to send join message: {:?}", e);
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to send join message: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to encode join message");
|
||||
|
||||
let Some(stream_protocol) = self.get_stream_protocol() else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Stream protocol not set");
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = stream_protocol.send_message(&push_msg) {
|
||||
tracing::error!("Failed to send push stream room message: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,26 +220,21 @@ impl SignallableImpl for Signaller {
|
||||
}
|
||||
|
||||
fn send_sdp(&self, _session_id: &str, sdp: &WebRTCSessionDescription) {
|
||||
let nestri_ws = {
|
||||
self.nestri_ws
|
||||
.read()
|
||||
.clone()
|
||||
.expect("NestriWebSocket not set")
|
||||
};
|
||||
let sdp_message = MessageSDP {
|
||||
base: MessageBase {
|
||||
payload_type: "sdp".to_string(),
|
||||
payload_type: "offer".to_string(),
|
||||
latency: None,
|
||||
},
|
||||
sdp: RTCSessionDescription::offer(sdp.sdp().as_text().unwrap()).unwrap(),
|
||||
};
|
||||
if let Ok(encoded) = encode_message(&sdp_message) {
|
||||
if let Err(e) = nestri_ws.send_message(encoded) {
|
||||
tracing::error!("Failed to send SDP message: {:?}", e);
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to send SDP message: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to encode SDP message");
|
||||
|
||||
let Some(stream_protocol) = self.get_stream_protocol() else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Stream protocol not set");
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = stream_protocol.send_message(&sdp_message) {
|
||||
tracing::error!("Failed to send SDP message: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -298,12 +245,6 @@ impl SignallableImpl for Signaller {
|
||||
sdp_m_line_index: u32,
|
||||
sdp_mid: Option<String>,
|
||||
) {
|
||||
let nestri_ws = {
|
||||
self.nestri_ws
|
||||
.read()
|
||||
.clone()
|
||||
.expect("NestriWebSocket not set")
|
||||
};
|
||||
let candidate_init = RTCIceCandidateInit {
|
||||
candidate: candidate.to_string(),
|
||||
sdp_mid,
|
||||
@@ -312,18 +253,19 @@ impl SignallableImpl for Signaller {
|
||||
};
|
||||
let ice_message = MessageICE {
|
||||
base: MessageBase {
|
||||
payload_type: "ice".to_string(),
|
||||
payload_type: "ice-candidate".to_string(),
|
||||
latency: None,
|
||||
},
|
||||
candidate: candidate_init,
|
||||
};
|
||||
if let Ok(encoded) = encode_message(&ice_message) {
|
||||
if let Err(e) = nestri_ws.send_message(encoded) {
|
||||
tracing::error!("Failed to send ICE message: {:?}", e);
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to send ICE message: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Failed to encode ICE message");
|
||||
|
||||
let Some(stream_protocol) = self.get_stream_protocol() else {
|
||||
gst::error!(gst::CAT_DEFAULT, "Stream protocol not set");
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = stream_protocol.send_message(&ice_message) {
|
||||
tracing::error!("Failed to send ICE candidate message: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::websocket::NestriWebSocket;
|
||||
use crate::p2p::p2p::NestriConnection;
|
||||
use gst::glib;
|
||||
use gst::subclass::prelude::*;
|
||||
use gstrswebrtc::signaller::Signallable;
|
||||
@@ -11,15 +11,20 @@ glib::wrapper! {
|
||||
}
|
||||
|
||||
impl NestriSignaller {
|
||||
pub fn new(nestri_ws: Arc<NestriWebSocket>, wayland_src: Arc<gst::Element>) -> Self {
|
||||
pub async fn new(
|
||||
room: String,
|
||||
nestri_conn: NestriConnection,
|
||||
wayland_src: Arc<gst::Element>,
|
||||
) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let obj: Self = glib::Object::new();
|
||||
obj.imp().set_nestri_ws(nestri_ws);
|
||||
obj.imp().set_stream_room(room);
|
||||
obj.imp().set_nestri_connection(nestri_conn).await?;
|
||||
obj.imp().set_wayland_src(wayland_src);
|
||||
obj
|
||||
Ok(obj)
|
||||
}
|
||||
}
|
||||
impl Default for NestriSignaller {
|
||||
fn default() -> Self {
|
||||
panic!("Cannot create NestriSignaller without NestriWebSocket");
|
||||
panic!("Cannot create NestriSignaller without NestriConnection and WaylandSrc");
|
||||
}
|
||||
}
|
||||
|
||||
3
packages/server/src/p2p.rs
Normal file
3
packages/server/src/p2p.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod p2p;
|
||||
pub mod p2p_safestream;
|
||||
pub mod p2p_protocol_stream;
|
||||
131
packages/server/src/p2p/p2p.rs
Normal file
131
packages/server/src/p2p/p2p.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use futures_util::StreamExt;
|
||||
use libp2p::multiaddr::Protocol;
|
||||
use libp2p::{
|
||||
Multiaddr, PeerId, Swarm, identify, noise, ping,
|
||||
swarm::{NetworkBehaviour, SwarmEvent},
|
||||
tcp, yamux,
|
||||
};
|
||||
use std::error::Error;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NestriConnection {
|
||||
pub peer_id: PeerId,
|
||||
pub control: libp2p_stream::Control,
|
||||
}
|
||||
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct NestriBehaviour {
|
||||
identify: identify::Behaviour,
|
||||
ping: ping::Behaviour,
|
||||
stream: libp2p_stream::Behaviour,
|
||||
}
|
||||
|
||||
pub struct NestriP2P {
|
||||
swarm: Arc<Mutex<Swarm<NestriBehaviour>>>,
|
||||
}
|
||||
impl NestriP2P {
|
||||
pub async fn new() -> Result<Self, Box<dyn Error>> {
|
||||
let swarm = Arc::new(Mutex::new(
|
||||
libp2p::SwarmBuilder::with_new_identity()
|
||||
.with_tokio()
|
||||
.with_tcp(
|
||||
tcp::Config::default(),
|
||||
noise::Config::new,
|
||||
yamux::Config::default,
|
||||
)?
|
||||
.with_dns()?
|
||||
.with_behaviour(|key| {
|
||||
let identify_behaviour = identify::Behaviour::new(identify::Config::new(
|
||||
"/ipfs/id/1.0.0".to_string(),
|
||||
key.public(),
|
||||
));
|
||||
let ping_behaviour = ping::Behaviour::default();
|
||||
let stream_behaviour = libp2p_stream::Behaviour::default();
|
||||
|
||||
Ok(NestriBehaviour {
|
||||
identify: identify_behaviour,
|
||||
ping: ping_behaviour,
|
||||
stream: stream_behaviour,
|
||||
})
|
||||
})?
|
||||
.build(),
|
||||
));
|
||||
|
||||
// Spawn the swarm event loop
|
||||
let swarm_clone = swarm.clone();
|
||||
tokio::spawn(swarm_loop(swarm_clone));
|
||||
|
||||
{
|
||||
let mut swarm_lock = swarm.lock().await;
|
||||
swarm_lock.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; // IPv4 - TCP Raw
|
||||
swarm_lock.listen_on("/ip6/::/tcp/0".parse()?)?; // IPv6 - TCP Raw
|
||||
}
|
||||
|
||||
Ok(NestriP2P { swarm })
|
||||
}
|
||||
|
||||
pub async fn connect(&self, conn_url: &str) -> Result<NestriConnection, Box<dyn Error>> {
|
||||
let conn_addr: Multiaddr = conn_url.parse()?;
|
||||
|
||||
let mut swarm_lock = self.swarm.lock().await;
|
||||
swarm_lock.dial(conn_addr.clone())?;
|
||||
|
||||
let Some(Protocol::P2p(peer_id)) = conn_addr.clone().iter().last() else {
|
||||
return Err("Invalid connection URL: missing peer ID".into());
|
||||
};
|
||||
|
||||
Ok(NestriConnection {
|
||||
peer_id,
|
||||
control: swarm_lock.behaviour().stream.new_control(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn swarm_loop(swarm: Arc<Mutex<Swarm<NestriBehaviour>>>) {
|
||||
loop {
|
||||
let event = {
|
||||
let mut swarm_lock = swarm.lock().await;
|
||||
swarm_lock.select_next_some().await
|
||||
};
|
||||
match event {
|
||||
SwarmEvent::NewListenAddr { address, .. } => {
|
||||
tracing::info!("Listening on: '{}'", address);
|
||||
}
|
||||
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||
tracing::info!("Connection established with peer: {}", peer_id);
|
||||
}
|
||||
SwarmEvent::ConnectionClosed { peer_id, cause, .. } => {
|
||||
if let Some(err) = cause {
|
||||
tracing::error!(
|
||||
"Connection with peer {} closed due to error: {}",
|
||||
peer_id,
|
||||
err
|
||||
);
|
||||
} else {
|
||||
tracing::info!("Connection with peer {} closed", peer_id);
|
||||
}
|
||||
}
|
||||
SwarmEvent::IncomingConnection {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
..
|
||||
} => {
|
||||
tracing::info!(
|
||||
"Incoming connection from: {} (send back to: {})",
|
||||
local_addr,
|
||||
send_back_addr
|
||||
);
|
||||
}
|
||||
SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => {
|
||||
if let Some(peer_id) = peer_id {
|
||||
tracing::error!("Failed to connect to peer {}: {}", peer_id, error);
|
||||
} else {
|
||||
tracing::error!("Failed to connect: {}", error);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
149
packages/server/src/p2p/p2p_protocol_stream.rs
Normal file
149
packages/server/src/p2p/p2p_protocol_stream.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use crate::p2p::p2p::NestriConnection;
|
||||
use crate::p2p::p2p_safestream::SafeStream;
|
||||
use libp2p::StreamProtocol;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
// Cloneable callback type
|
||||
pub type CallbackInner = dyn Fn(Vec<u8>) + Send + Sync + 'static;
|
||||
pub struct Callback(Arc<CallbackInner>);
|
||||
impl Callback {
|
||||
pub fn new<F>(f: F) -> Self
|
||||
where
|
||||
F: Fn(Vec<u8>) + Send + Sync + 'static,
|
||||
{
|
||||
Callback(Arc::new(f))
|
||||
}
|
||||
|
||||
pub fn call(&self, data: Vec<u8>) {
|
||||
self.0(data)
|
||||
}
|
||||
}
|
||||
impl Clone for Callback {
|
||||
fn clone(&self) -> Self {
|
||||
Callback(Arc::clone(&self.0))
|
||||
}
|
||||
}
|
||||
impl From<Box<CallbackInner>> for Callback {
|
||||
fn from(boxed: Box<CallbackInner>) -> Self {
|
||||
Callback(Arc::from(boxed))
|
||||
}
|
||||
}
|
||||
|
||||
/// NestriStreamProtocol manages the stream protocol for Nestri connections.
|
||||
pub struct NestriStreamProtocol {
|
||||
tx: mpsc::Sender<Vec<u8>>,
|
||||
safe_stream: Arc<SafeStream>,
|
||||
callbacks: Arc<RwLock<HashMap<String, Callback>>>,
|
||||
}
|
||||
impl NestriStreamProtocol {
|
||||
const NESTRI_PROTOCOL_STREAM_PUSH: StreamProtocol =
|
||||
StreamProtocol::new("/nestri-relay/stream-push/1.0.0");
|
||||
|
||||
pub async fn new(
|
||||
nestri_connection: NestriConnection,
|
||||
) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let mut nestri_connection = nestri_connection.clone();
|
||||
let push_stream = match nestri_connection
|
||||
.control
|
||||
.open_stream(nestri_connection.peer_id, Self::NESTRI_PROTOCOL_STREAM_PUSH)
|
||||
.await
|
||||
{
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
return Err(Box::new(e));
|
||||
}
|
||||
};
|
||||
|
||||
let (tx, rx) = mpsc::channel(1000);
|
||||
|
||||
let sp = NestriStreamProtocol {
|
||||
tx,
|
||||
safe_stream: Arc::new(SafeStream::new(push_stream)),
|
||||
callbacks: Arc::new(RwLock::new(HashMap::new())),
|
||||
};
|
||||
|
||||
// Spawn the loops
|
||||
sp.spawn_read_loop();
|
||||
sp.spawn_write_loop(rx);
|
||||
|
||||
Ok(sp)
|
||||
}
|
||||
|
||||
fn spawn_read_loop(&self) -> tokio::task::JoinHandle<()> {
|
||||
let safe_stream = self.safe_stream.clone();
|
||||
let callbacks = self.callbacks.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let data = {
|
||||
match safe_stream.receive_raw().await {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
tracing::error!("Error receiving data: {}", e);
|
||||
break; // Exit the loop on error
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match serde_json::from_slice::<crate::messages::MessageBase>(&data) {
|
||||
Ok(base_message) => {
|
||||
let response_type = base_message.payload_type;
|
||||
let callback = {
|
||||
let callbacks_lock = callbacks.read().unwrap();
|
||||
callbacks_lock.get(&response_type).cloned()
|
||||
};
|
||||
|
||||
if let Some(callback) = callback {
|
||||
// Call the registered callback with the raw data
|
||||
callback.call(data);
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"No callback registered for response type: {}",
|
||||
response_type
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to decode message: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn spawn_write_loop(&self, mut rx: mpsc::Receiver<Vec<u8>>) -> tokio::task::JoinHandle<()> {
|
||||
let safe_stream = self.safe_stream.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// Wait for a message from the channel
|
||||
if let Some(tx_data) = rx.recv().await {
|
||||
if let Err(e) = safe_stream.send_raw(&tx_data).await {
|
||||
tracing::error!("Error sending data: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
tracing::info!("Receiver closed, exiting write loop");
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_message<M: serde::Serialize>(
|
||||
&self,
|
||||
message: &M,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let json_data = serde_json::to_vec(message)?;
|
||||
self.tx.try_send(json_data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register a callback for a specific response type
|
||||
pub fn register_callback<F>(&self, response_type: &str, callback: F)
|
||||
where
|
||||
F: Fn(Vec<u8>) + Send + Sync + 'static,
|
||||
{
|
||||
let mut callbacks_lock = self.callbacks.write().unwrap();
|
||||
callbacks_lock.insert(response_type.to_string(), Callback::new(callback));
|
||||
}
|
||||
}
|
||||
105
packages/server/src/p2p/p2p_safestream.rs
Normal file
105
packages/server/src/p2p/p2p_safestream.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use futures_util::io::{ReadHalf, WriteHalf};
|
||||
use futures_util::{AsyncReadExt, AsyncWriteExt};
|
||||
use prost::Message;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
const MAX_SIZE: usize = 1024 * 1024; // 1MB
|
||||
|
||||
pub struct SafeStream {
|
||||
stream_read: Arc<Mutex<ReadHalf<libp2p::Stream>>>,
|
||||
stream_write: Arc<Mutex<WriteHalf<libp2p::Stream>>>,
|
||||
}
|
||||
impl SafeStream {
|
||||
pub fn new(stream: libp2p::Stream) -> Self {
|
||||
let (read, write) = stream.split();
|
||||
SafeStream {
|
||||
stream_read: Arc::new(Mutex::new(read)),
|
||||
stream_write: Arc::new(Mutex::new(write)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_json<T: Serialize>(
|
||||
&self,
|
||||
data: &T,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let json_data = serde_json::to_vec(data)?;
|
||||
tracing::info!("Sending JSON");
|
||||
let e = self.send_with_length_prefix(&json_data).await;
|
||||
tracing::info!("Sent JSON");
|
||||
e
|
||||
}
|
||||
|
||||
pub async fn receive_json<T: DeserializeOwned>(&self) -> Result<T, Box<dyn std::error::Error>> {
|
||||
let data = self.receive_with_length_prefix().await?;
|
||||
let msg = serde_json::from_slice(&data)?;
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
pub async fn send_proto<M: Message>(&self, msg: &M) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let mut proto_data = Vec::new();
|
||||
msg.encode(&mut proto_data)?;
|
||||
self.send_with_length_prefix(&proto_data).await
|
||||
}
|
||||
|
||||
pub async fn receive_proto<M: Message + Default>(
|
||||
&self,
|
||||
) -> Result<M, Box<dyn std::error::Error>> {
|
||||
let data = self.receive_with_length_prefix().await?;
|
||||
let msg = M::decode(&*data)?;
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
pub async fn send_raw(&self, data: &[u8]) -> Result<(), Box<dyn std::error::Error>> {
|
||||
self.send_with_length_prefix(data).await
|
||||
}
|
||||
|
||||
pub async fn receive_raw(&self) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
|
||||
self.receive_with_length_prefix().await
|
||||
}
|
||||
|
||||
async fn send_with_length_prefix(&self, data: &[u8]) -> Result<(), Box<dyn std::error::Error>> {
|
||||
if data.len() > MAX_SIZE {
|
||||
return Err(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"Data exceeds maximum size",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut stream_write = self.stream_write.lock().await;
|
||||
|
||||
// Write the 4-byte length prefix
|
||||
let mut length_prefix = [0u8; 4];
|
||||
BigEndian::write_u32(&mut length_prefix, data.len() as u32);
|
||||
stream_write.write_all(&length_prefix).await?;
|
||||
|
||||
// Write the actual data
|
||||
stream_write.write_all(data).await?;
|
||||
stream_write.flush().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn receive_with_length_prefix(&self) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
|
||||
let mut stream_read = self.stream_read.lock().await;
|
||||
|
||||
// Read the 4-byte length prefix
|
||||
let mut length_prefix = [0u8; 4];
|
||||
stream_read.read_exact(&mut length_prefix).await?;
|
||||
let length = BigEndian::read_u32(&length_prefix) as usize;
|
||||
|
||||
if length > MAX_SIZE {
|
||||
return Err(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"Data exceeds maximum size",
|
||||
)));
|
||||
}
|
||||
|
||||
// Read the actual data
|
||||
let mut buffer = vec![0; length];
|
||||
stream_read.read_exact(&mut buffer).await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
}
|
||||
@@ -1,225 +0,0 @@
|
||||
use crate::messages::decode_message;
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::sink::SinkExt;
|
||||
use futures_util::stream::{SplitSink, SplitStream};
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{Mutex, Notify, mpsc};
|
||||
use tokio::time::sleep;
|
||||
use tokio_tungstenite::tungstenite::{Message, Utf8Bytes};
|
||||
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream, connect_async};
|
||||
|
||||
type Callback = Box<dyn Fn(String) + Send + Sync>;
|
||||
type WSRead = SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>;
|
||||
type WSWrite = SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NestriWebSocket {
|
||||
ws_url: String,
|
||||
reader: Arc<Mutex<Option<WSRead>>>,
|
||||
writer: Arc<Mutex<Option<WSWrite>>>,
|
||||
callbacks: Arc<RwLock<HashMap<String, Callback>>>,
|
||||
message_tx: mpsc::UnboundedSender<String>,
|
||||
reconnected_notify: Arc<Notify>,
|
||||
}
|
||||
impl NestriWebSocket {
|
||||
pub async fn new(ws_url: String) -> Result<NestriWebSocket, Box<dyn Error>> {
|
||||
// Attempt to connect to the WebSocket
|
||||
let ws_stream = NestriWebSocket::do_connect(&ws_url).await.unwrap();
|
||||
|
||||
// Split the stream into read and write halves
|
||||
let (write, read) = ws_stream.split();
|
||||
|
||||
// Create the message channel
|
||||
let (message_tx, message_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let ws = NestriWebSocket {
|
||||
ws_url,
|
||||
reader: Arc::new(Mutex::new(Some(read))),
|
||||
writer: Arc::new(Mutex::new(Some(write))),
|
||||
callbacks: Arc::new(RwLock::new(HashMap::new())),
|
||||
message_tx: message_tx.clone(),
|
||||
reconnected_notify: Arc::new(Notify::new()),
|
||||
};
|
||||
|
||||
// Spawn the read loop
|
||||
ws.spawn_read_loop();
|
||||
// Spawn the write loop
|
||||
ws.spawn_write_loop(message_rx);
|
||||
|
||||
Ok(ws)
|
||||
}
|
||||
|
||||
async fn do_connect(
|
||||
ws_url: &str,
|
||||
) -> Result<WebSocketStream<MaybeTlsStream<TcpStream>>, Box<dyn Error + Send + Sync>> {
|
||||
loop {
|
||||
match connect_async(ws_url).await {
|
||||
Ok((ws_stream, _)) => {
|
||||
return Ok(ws_stream);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to connect to WebSocket, retrying: {:?}", e);
|
||||
sleep(Duration::from_secs(3)).await; // Wait before retrying
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handles message -> callback calls and reconnects on error/disconnect
|
||||
fn spawn_read_loop(&self) {
|
||||
let reader = self.reader.clone();
|
||||
let callbacks = self.callbacks.clone();
|
||||
let self_clone = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// Lock the reader to get the WSRead, then drop the lock
|
||||
let ws_read_option = {
|
||||
let mut reader_lock = reader.lock().await;
|
||||
reader_lock.take()
|
||||
};
|
||||
|
||||
let mut ws_read = match ws_read_option {
|
||||
Some(ws_read) => ws_read,
|
||||
None => {
|
||||
tracing::error!("Reader is None, cannot proceed");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
while let Some(message_result) = ws_read.next().await {
|
||||
match message_result {
|
||||
Ok(message) => {
|
||||
let data = message
|
||||
.into_text()
|
||||
.expect("failed to turn message into text");
|
||||
let base_message = match decode_message(data.to_string()) {
|
||||
Ok(base_message) => base_message,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to decode message: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let callbacks_lock = callbacks.read().unwrap();
|
||||
if let Some(callback) = callbacks_lock.get(&base_message.payload_type) {
|
||||
let data = data.clone();
|
||||
callback(data.to_string());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Error receiving message: {:?}, reconnecting in 3 seconds...",
|
||||
e
|
||||
);
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
self_clone.reconnect().await.unwrap();
|
||||
break; // Break the inner loop to get a new ws_read
|
||||
}
|
||||
}
|
||||
}
|
||||
// After reconnection, the loop continues, and we acquire a new ws_read
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn spawn_write_loop(&self, mut message_rx: mpsc::UnboundedReceiver<String>) {
|
||||
let writer = self.writer.clone();
|
||||
let self_clone = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// Wait for a message from the channel
|
||||
if let Some(message) = message_rx.recv().await {
|
||||
loop {
|
||||
// Acquire the writer lock
|
||||
let mut writer_lock = writer.lock().await;
|
||||
if let Some(writer) = writer_lock.as_mut() {
|
||||
// Try to send the message over the WebSocket
|
||||
match writer
|
||||
.send(Message::Text(Utf8Bytes::from(message.clone())))
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
// Message sent successfully
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Error sending message: {:?}", e);
|
||||
// Attempt to reconnect
|
||||
if let Err(e) = self_clone.reconnect().await {
|
||||
tracing::error!("Error during reconnection: {:?}", e);
|
||||
// Wait before retrying
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tracing::error!("Writer is None, cannot send message");
|
||||
// Attempt to reconnect
|
||||
if let Err(e) = self_clone.reconnect().await {
|
||||
tracing::error!("Error during reconnection: {:?}", e);
|
||||
// Wait before retrying
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn reconnect(&self) -> Result<(), Box<dyn Error + Send + Sync>> {
|
||||
loop {
|
||||
match NestriWebSocket::do_connect(&self.ws_url).await {
|
||||
Ok(ws_stream) => {
|
||||
let (write, read) = ws_stream.split();
|
||||
{
|
||||
let mut writer_lock = self.writer.lock().await;
|
||||
*writer_lock = Some(write);
|
||||
}
|
||||
{
|
||||
let mut reader_lock = self.reader.lock().await;
|
||||
*reader_lock = Some(read);
|
||||
}
|
||||
// Notify subscribers of successful reconnection
|
||||
self.reconnected_notify.notify_waiters();
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to reconnect to WebSocket: {:?}", e);
|
||||
sleep(Duration::from_secs(3)).await; // Wait before retrying
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a message through the WebSocket
|
||||
pub fn send_message(&self, message: String) -> Result<(), Box<dyn Error>> {
|
||||
self.message_tx
|
||||
.send(message)
|
||||
.map_err(|e| format!("Failed to send message: {:?}", e).into())
|
||||
}
|
||||
|
||||
/// Register a callback for a specific response type
|
||||
pub fn register_callback<F>(&self, response_type: &str, callback: F)
|
||||
where
|
||||
F: Fn(String) + Send + Sync + 'static,
|
||||
{
|
||||
let mut callbacks_lock = self.callbacks.write().unwrap();
|
||||
callbacks_lock.insert(response_type.to_string(), Box::new(callback));
|
||||
}
|
||||
|
||||
/// Subscribe to event for reconnection
|
||||
pub fn subscribe_reconnected(&self) -> Arc<Notify> {
|
||||
self.reconnected_notify.clone()
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user