feat: Migrate from WebSocket to libp2p for peer-to-peer connectivity (#286)

## Description
Whew, some stuff is still not re-implemented, but it's working!

Rabbit's gonna explode with the amount of changes I reckon 😅



<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **New Features**
- Introduced a peer-to-peer relay system using libp2p with enhanced
stream forwarding, room state synchronization, and mDNS peer discovery.
- Added decentralized room and participant management, metrics
publishing, and safe, size-limited, concurrent message streaming with
robust framing and callback dispatching.
- Implemented asynchronous, callback-driven message handling over custom
libp2p streams replacing WebSocket signaling.
- **Improvements**
- Migrated signaling and stream protocols from WebSocket to libp2p,
improving reliability and scalability.
- Simplified configuration and environment variables, removing
deprecated flags and adding persistent data support.
- Enhanced logging, error handling, and connection management for better
observability and robustness.
- Refined RTP header extension registration and NAT IP handling for
improved WebRTC performance.
- **Bug Fixes**
- Improved ICE candidate buffering and SDP negotiation in WebRTC
connections.
  - Fixed NAT IP and UDP port range configuration issues.
- **Refactor**
- Modularized codebase, reorganized relay and server logic, and removed
deprecated WebSocket-based components.
- Streamlined message structures, removed obsolete enums and message
types, and simplified SafeMap concurrency.
- Replaced WebSocket signaling with libp2p stream protocols in server
and relay components.
- **Chores**
- Updated and cleaned dependencies across Go, Rust, and JavaScript
packages.
  - Added `.gitignore` for persistent data directory in relay package.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Co-authored-by: DatCaptainHorse <DatCaptainHorse@users.noreply.github.com>
Co-authored-by: Philipp Neumann <3daquawolf@gmail.com>
This commit is contained in:
Kristian Ollikainen
2025-06-06 16:48:49 +03:00
committed by GitHub
parent e67a8d2b32
commit 6e82eff9e2
48 changed files with 4741 additions and 2787 deletions

View File

@@ -0,0 +1,13 @@
package core
import "time"
// --- Constants ---
const (
// PubSub Topics
roomStateTopicName = "room-states"
relayMetricsTopicName = "relay-metrics"
// Timers and Intervals
metricsPublishInterval = 15 * time.Second // How often to publish own metrics
)

View File

@@ -0,0 +1,214 @@
package core
import (
"context"
"crypto/ed25519"
"fmt"
"log/slog"
"os"
"relay/internal/common"
"relay/internal/shared"
"time"
"github.com/libp2p/go-libp2p"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
"github.com/multiformats/go-multiaddr"
"github.com/oklog/ulid/v2"
"github.com/pion/webrtc/v4"
)
// -- Variables --
var globalRelay *Relay
// -- Structs --
// RelayInfo contains light information of Relay, in mesh-friendly format
type RelayInfo struct {
ID peer.ID
MeshAddrs []string // Addresses of this relay
MeshRooms *common.SafeMap[string, shared.RoomInfo] // Rooms hosted by this relay
MeshLatencies *common.SafeMap[string, time.Duration] // Latencies to other peers from this relay
}
// Relay structure enhanced with metrics and state
type Relay struct {
RelayInfo
Host host.Host // libp2p host for peer-to-peer networking
PubSub *pubsub.PubSub // PubSub for state synchronization
PingService *ping.PingService
// Local
LocalRooms *common.SafeMap[ulid.ULID, *shared.Room] // room ID -> local Room struct (hosted by this relay)
LocalMeshPeers *common.SafeMap[peer.ID, *RelayInfo] // peer ID -> mesh peer relay info (connected to this relay)
LocalMeshConnections *common.SafeMap[peer.ID, *webrtc.PeerConnection] // peer ID -> PeerConnection (connected to this relay)
// Protocols
ProtocolRegistry
// PubSub Topics
pubTopicState *pubsub.Topic // topic for room states
pubTopicRelayMetrics *pubsub.Topic // topic for relay metrics/status
}
func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay, error) {
listenAddrs := []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port), // IPv4 - Raw TCP
fmt.Sprintf("/ip6/::/tcp/%d", port), // IPv6 - Raw TCP
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/ws", port), // IPv4 - TCP WebSocket
fmt.Sprintf("/ip6/::/tcp/%d/ws", port), // IPv6 - TCP WebSocket
}
var muAddrs []multiaddr.Multiaddr
for _, addr := range listenAddrs {
multiAddr, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("failed to parse multiaddr '%s': %w", addr, err)
}
muAddrs = append(muAddrs, multiAddr)
}
// Initialize libp2p host
p2pHost, err := libp2p.New(
// TODO: Currently static identity
libp2p.Identity(identityKey),
// Enable required transports
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Transport(ws.New),
// Other options
libp2p.ListenAddrs(muAddrs...),
libp2p.Security(noise.ID, noise.New),
libp2p.EnableRelay(),
libp2p.EnableHolePunching(),
libp2p.EnableNATService(),
libp2p.EnableAutoNATv2(),
libp2p.ShareTCPListener(),
)
if err != nil {
return nil, fmt.Errorf("failed to create libp2p host for relay: %w", err)
}
// Set up pubsub
p2pPubsub, err := pubsub.NewGossipSub(ctx, p2pHost)
if err != nil {
return nil, fmt.Errorf("failed to create pubsub: %w, addrs: %v", err, p2pHost.Addrs())
}
// Initialize Ping Service
pingSvc := ping.NewPingService(p2pHost)
var addresses []string
for _, addr := range p2pHost.Addrs() {
addresses = append(addresses, addr.String())
}
r := &Relay{
RelayInfo: RelayInfo{
ID: p2pHost.ID(),
MeshAddrs: addresses,
MeshRooms: common.NewSafeMap[string, shared.RoomInfo](),
MeshLatencies: common.NewSafeMap[string, time.Duration](),
},
Host: p2pHost,
PubSub: p2pPubsub,
PingService: pingSvc,
LocalRooms: common.NewSafeMap[ulid.ULID, *shared.Room](),
LocalMeshPeers: common.NewSafeMap[peer.ID, *RelayInfo](),
}
// Add network notifier after relay is initialized
p2pHost.Network().Notify(&networkNotifier{relay: r})
// Set up PubSub topics and handlers
if err = r.setupPubSub(ctx); err != nil {
err = p2pHost.Close()
if err != nil {
slog.Error("Failed to close host after PubSub setup failure", "err", err)
}
return nil, fmt.Errorf("failed to setup PubSub: %w", err)
}
// Initialize Protocol Registry
r.ProtocolRegistry = NewProtocolRegistry(r)
// Start discovery features
if err = startMDNSDiscovery(r); err != nil {
slog.Warn("Failed to initialize mDNS discovery, continuing without..", "error", err)
}
// Start background tasks
go r.periodicMetricsPublisher(ctx)
printConnectInstructions(p2pHost)
return r, nil
}
func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
var err error
persistentDir := common.GetFlags().PersistDir
// Load or generate identity key
var identityKey crypto.PrivKey
var privKey ed25519.PrivateKey
// First check if we need to generate identity
hasIdentity := len(persistentDir) > 0 && common.GetFlags().RegenIdentity == false
if hasIdentity {
_, err = os.Stat(persistentDir + "/identity.key")
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to check identity key file: %w", err)
} else if os.IsNotExist(err) {
hasIdentity = false
}
}
if !hasIdentity {
// Make sure the persistent directory exists
if err = os.MkdirAll(persistentDir, 0700); err != nil {
return fmt.Errorf("failed to create persistent data directory: %w", err)
}
// Generate
slog.Info("Generating new identity for relay")
privKey, err = common.GenerateED25519Key()
if err != nil {
return fmt.Errorf("failed to generate new identity: %w", err)
}
// Save the key
if err = common.SaveED25519Key(privKey, persistentDir+"/identity.key"); err != nil {
return fmt.Errorf("failed to save identity key: %w", err)
}
slog.Info("New identity generated and saved", "path", persistentDir+"/identity.key")
} else {
slog.Info("Loading existing identity for relay", "path", persistentDir+"/identity.key")
// Load the key
privKey, err = common.LoadED25519Key(persistentDir + "/identity.key")
if err != nil {
return fmt.Errorf("failed to load identity key: %w", err)
}
}
// Convert to libp2p crypto.PrivKey
identityKey, err = crypto.UnmarshalEd25519PrivateKey(privKey)
if err != nil {
return fmt.Errorf("failed to unmarshal ED25519 private key: %w", err)
}
globalRelay, err = NewRelay(ctx, common.GetFlags().EndpointPort, identityKey)
if err != nil {
return fmt.Errorf("failed to create relay: %w", err)
}
if err = common.InitWebRTCAPI(); err != nil {
return err
}
slog.Info("Relay initialized", "id", globalRelay.ID)
return nil
}

View File

@@ -0,0 +1,38 @@
package core
import (
"context"
"fmt"
"log/slog"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
)
const (
mdnsDiscoveryRendezvous = "/nestri-relay/mdns-discovery/1.0.0" // Shared string for mDNS discovery
)
type discoveryNotifee struct {
relay *Relay
}
func (d *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
if d.relay != nil {
if err := d.relay.connectToRelay(context.Background(), &pi); err != nil {
slog.Error("failed to connect to discovered relay", "peer", pi.ID, "error", err)
}
}
}
func startMDNSDiscovery(relay *Relay) error {
d := &discoveryNotifee{
relay: relay,
}
service := mdns.NewMdnsService(relay.Host, mdnsDiscoveryRendezvous, d)
if err := service.Start(); err != nil {
return fmt.Errorf("failed to start mDNS discovery: %w", err)
}
return nil
}

View File

@@ -0,0 +1,128 @@
package core
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/peer"
)
// --- Metrics Collection and Publishing ---
// periodicMetricsPublisher periodically gathers local metrics and publishes them.
func (r *Relay) periodicMetricsPublisher(ctx context.Context) {
ticker := time.NewTicker(metricsPublishInterval)
defer ticker.Stop()
// Publish immediately on start
if err := r.publishRelayMetrics(ctx); err != nil {
slog.Error("Failed to publish initial relay metrics", "err", err)
}
for {
select {
case <-ctx.Done():
slog.Info("Stopping metrics publisher")
return
case <-ticker.C:
if err := r.publishRelayMetrics(ctx); err != nil {
slog.Error("Failed to publish relay metrics", "err", err)
}
}
}
}
// publishRelayMetrics sends the current relay status to the mesh.
func (r *Relay) publishRelayMetrics(ctx context.Context) error {
if r.pubTopicRelayMetrics == nil {
slog.Warn("Cannot publish relay metrics: topic is nil")
return nil
}
// Check all peer latencies
r.checkAllPeerLatencies(ctx)
data, err := json.Marshal(r.RelayInfo)
if err != nil {
return fmt.Errorf("failed to marshal relay status: %w", err)
}
if pubErr := r.pubTopicRelayMetrics.Publish(ctx, data); pubErr != nil {
// Don't return error on publish failure, just log
slog.Error("Failed to publish relay metrics message", "err", pubErr)
}
return nil
}
// checkAllPeerLatencies measures latency to all currently connected peers.
func (r *Relay) checkAllPeerLatencies(ctx context.Context) {
var wg sync.WaitGroup
for _, p := range r.Host.Network().Peers() {
if p == r.ID {
continue // Skip self
}
wg.Add(1)
// Run checks concurrently
go func(peerID peer.ID) {
defer wg.Done()
go r.measureLatencyToPeer(ctx, peerID)
}(p)
}
wg.Wait() // Wait for all latency checks to complete
}
// measureLatencyToPeer pings a specific peer and updates the local latency map.
func (r *Relay) measureLatencyToPeer(ctx context.Context, peerID peer.ID) {
// Check peer status first
if !r.hasConnectedPeer(peerID) {
return
}
// Create a context for the ping operation
pingCtx, cancel := context.WithCancel(ctx)
defer cancel()
// Use the PingService instance stored in the Relay struct
if r.PingService == nil {
slog.Error("PingService is nil, cannot measure latency", "peer", peerID)
return
}
resultsCh := r.PingService.Ping(pingCtx, peerID)
// Wait for the result (or timeout)
select {
case <-pingCtx.Done():
// Ping timed out
slog.Warn("Latency check canceled", "peer", peerID, "err", pingCtx.Err())
case result, ok := <-resultsCh:
if !ok {
// Channel closed unexpectedly
slog.Warn("Ping service channel closed unexpectedly", "peer", peerID)
return
}
// Received ping result
if result.Error != nil {
slog.Warn("Latency check failed, removing peer from local peers map", "peer", peerID, "err", result.Error)
// Remove from MeshPeers if ping failed
if r.LocalMeshPeers.Has(peerID) {
r.LocalMeshPeers.Delete(peerID)
}
return
}
// Ping successful, update latency
latency := result.RTT
// Ensure latency is not zero if successful, assign a minimal value if so.
// Sometimes RTT can be reported as 0 for very fast local connections.
if latency <= 0 {
latency = 1 * time.Microsecond
}
r.RelayInfo.MeshLatencies.Set(peerID.String(), latency)
}
}

View File

@@ -0,0 +1,128 @@
package core
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
)
// --- Structs ---
// networkNotifier logs connection events and updates relay state
type networkNotifier struct {
relay *Relay
}
// Connected is called when a connection is established
func (n *networkNotifier) Connected(net network.Network, conn network.Conn) {
if n.relay == nil {
n.relay.onPeerConnected(conn.RemotePeer())
}
}
// Disconnected is called when a connection is terminated
func (n *networkNotifier) Disconnected(net network.Network, conn network.Conn) {
// Update the status of the disconnected peer
if n.relay != nil {
n.relay.onPeerDisconnected(conn.RemotePeer())
}
}
// Listen is called when the node starts listening on an address
func (n *networkNotifier) Listen(net network.Network, addr multiaddr.Multiaddr) {}
// ListenClose is called when the node stops listening on an address
func (n *networkNotifier) ListenClose(net network.Network, addr multiaddr.Multiaddr) {}
// --- PubSub Setup ---
// setupPubSub initializes PubSub topics and subscriptions.
func (r *Relay) setupPubSub(ctx context.Context) error {
var err error
// Room State Topic
r.pubTopicState, err = r.PubSub.Join(roomStateTopicName)
if err != nil {
return fmt.Errorf("failed to join room state topic '%s': %w", roomStateTopicName, err)
}
stateSub, err := r.pubTopicState.Subscribe()
if err != nil {
return fmt.Errorf("failed to subscribe to room state topic '%s': %w", roomStateTopicName, err)
}
go r.handleRoomStateMessages(ctx, stateSub) // Handler in relay_state.go
// Relay Metrics Topic
r.pubTopicRelayMetrics, err = r.PubSub.Join(relayMetricsTopicName)
if err != nil {
return fmt.Errorf("failed to join relay metrics topic '%s': %w", relayMetricsTopicName, err)
}
metricsSub, err := r.pubTopicRelayMetrics.Subscribe()
if err != nil {
return fmt.Errorf("failed to subscribe to relay metrics topic '%s': %w", relayMetricsTopicName, err)
}
go r.handleRelayMetricsMessages(ctx, metricsSub) // Handler in relay_state.go
slog.Info("PubSub topics joined and subscriptions started")
return nil
}
// --- Connection Management ---
// connectToRelay is internal method to connect to a relay peer using multiaddresses
func (r *Relay) connectToRelay(ctx context.Context, peerInfo *peer.AddrInfo) error {
if peerInfo.ID == r.ID {
return errors.New("cannot connect to self")
}
// Use a timeout for the connection attempt
connectCtx, cancel := context.WithTimeout(ctx, 15*time.Second) // 15s timeout
defer cancel()
slog.Info("Attempting to connect to peer", "peer", peerInfo.ID, "addrs", peerInfo.Addrs)
if err := r.Host.Connect(connectCtx, *peerInfo); err != nil {
return fmt.Errorf("failed to connect to %s: %w", peerInfo.ID, err)
}
slog.Info("Successfully connected to peer", "peer", peerInfo.ID, "addrs", peerInfo.Addrs)
return nil
}
// ConnectToRelay connects to another relay by its multiaddress.
func (r *Relay) ConnectToRelay(ctx context.Context, addr string) error {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("invalid multiaddress: %w", err)
}
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
if err != nil {
return fmt.Errorf("failed to extract peer info: %w", err)
}
return r.connectToRelay(ctx, peerInfo)
}
// printConnectInstructions logs the multiaddresses for connecting to this relay.
func printConnectInstructions(p2pHost host.Host) {
peerInfo := peer.AddrInfo{
ID: p2pHost.ID(),
Addrs: p2pHost.Addrs(),
}
addrs, err := peer.AddrInfoToP2pAddrs(&peerInfo)
if err != nil {
slog.Error("Failed to convert peer info to addresses", "err", err)
return
}
slog.Info("Mesh connection addresses:")
for _, addr := range addrs {
slog.Info(fmt.Sprintf("> %s", addr.String()))
}
}

View File

@@ -0,0 +1,694 @@
package core
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"relay/internal/common"
"relay/internal/connections"
"relay/internal/shared"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pion/rtp"
"github.com/pion/webrtc/v4"
)
// TODO:s
// TODO: When disconnecting with stream open, causes crash on requester
// TODO: Need to trigger stream request if remote room is online and there are participants in local waiting
// TODO: Cleanup local room state when stream is closed upstream
// --- Protocol IDs ---
const (
protocolStreamRequest = "/nestri-relay/stream-request/1.0.0" // For requesting a stream from relay
protocolStreamPush = "/nestri-relay/stream-push/1.0.0" // For pushing a stream to relay
)
// --- Protocol Types ---
// StreamConnection is a connection between two relays for stream protocol
type StreamConnection struct {
pc *webrtc.PeerConnection
ndc *connections.NestriDataChannel
}
// StreamProtocol deals with meshed stream forwarding
type StreamProtocol struct {
relay *Relay
servedConns *common.SafeMap[peer.ID, *StreamConnection] // peer ID -> StreamConnection (for served streams)
incomingConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for incoming pushed streams)
requestedConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for requested streams from other relays)
}
func NewStreamProtocol(relay *Relay) *StreamProtocol {
protocol := &StreamProtocol{
relay: relay,
servedConns: common.NewSafeMap[peer.ID, *StreamConnection](),
incomingConns: common.NewSafeMap[string, *StreamConnection](),
requestedConns: common.NewSafeMap[string, *StreamConnection](),
}
protocol.relay.Host.SetStreamHandler(protocolStreamRequest, protocol.handleStreamRequest)
protocol.relay.Host.SetStreamHandler(protocolStreamPush, protocol.handleStreamPush)
return protocol
}
// --- Protocol Stream Handlers ---
// handleStreamRequest manages a request from another relay for a stream hosted locally
func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
safeBRW := common.NewSafeBufioRW(brw)
iceHolder := make([]webrtc.ICECandidateInit, 0)
for {
data, err := safeBRW.Receive()
if err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
slog.Debug("Stream request connection closed by peer", "peer", stream.Conn().RemotePeer())
return
}
slog.Error("Failed to receive data", "err", err)
_ = stream.Reset()
return
}
var baseMsg connections.MessageBase
if err = json.Unmarshal(data, &baseMsg); err != nil {
slog.Error("Failed to unmarshal base message", "err", err)
continue
}
switch baseMsg.Type {
case "request-stream-room":
var rawMsg connections.MessageRaw
if err = json.Unmarshal(data, &rawMsg); err != nil {
slog.Error("Failed to unmarshal raw message for room stream request", "err", err)
continue
}
var roomName string
if err = json.Unmarshal(rawMsg.Data, &roomName); err != nil {
slog.Error("Failed to unmarshal room name from raw message", "err", err)
continue
}
slog.Info("Received stream request for room", "room", roomName)
room := sp.relay.GetRoomByName(roomName)
if room == nil || !room.IsOnline() || room.OwnerID != sp.relay.ID {
// TODO: Allow forward requests to other relays from here?
slog.Debug("Cannot provide stream for nil, offline or non-owned room", "room", roomName, "is_online", room != nil && room.IsOnline(), "is_owner", room != nil && room.OwnerID == sp.relay.ID)
// Respond with "request-stream-offline" message with room name
// TODO: Store the peer and send "online" message when the room comes online
roomNameData, err := json.Marshal(roomName)
if err != nil {
slog.Error("Failed to marshal room name for request stream offline", "room", roomName, "err", err)
continue
} else {
if err = safeBRW.SendJSON(connections.NewMessageRaw(
"request-stream-offline",
roomNameData,
)); err != nil {
slog.Error("Failed to send request stream offline message", "room", roomName, "err", err)
}
}
continue
}
pc, err := common.CreatePeerConnection(func() {
slog.Info("PeerConnection closed for requested stream", "room", roomName)
// Cleanup the stream connection
if ok := sp.servedConns.Has(stream.Conn().RemotePeer()); ok {
sp.servedConns.Delete(stream.Conn().RemotePeer())
}
})
if err != nil {
slog.Error("Failed to create PeerConnection for requested stream", "room", roomName, "err", err)
continue
}
// Add tracks
if room.AudioTrack != nil {
if _, err = pc.AddTrack(room.AudioTrack); err != nil {
slog.Error("Failed to add audio track for requested stream", "room", roomName, "err", err)
continue
}
}
if room.VideoTrack != nil {
if _, err = pc.AddTrack(room.VideoTrack); err != nil {
slog.Error("Failed to add video track for requested stream", "room", roomName, "err", err)
continue
}
}
// DataChannel setup
settingOrdered := true
settingMaxRetransmits := uint16(2)
dc, err := pc.CreateDataChannel("relay-data", &webrtc.DataChannelInit{
Ordered: &settingOrdered,
MaxRetransmits: &settingMaxRetransmits,
})
if err != nil {
slog.Error("Failed to create DataChannel for requested stream", "room", roomName, "err", err)
continue
}
ndc := connections.NewNestriDataChannel(dc)
ndc.RegisterOnOpen(func() {
slog.Debug("Relay DataChannel opened for requested stream", "room", roomName)
})
ndc.RegisterOnClose(func() {
slog.Debug("Relay DataChannel closed for requested stream", "room", roomName)
})
ndc.RegisterMessageCallback("input", func(data []byte) {
if room.DataChannel != nil {
if err = room.DataChannel.SendBinary(data); err != nil {
slog.Error("Failed to forward input message from mesh to upstream room", "room", roomName, "err", err)
}
}
})
// ICE Candidate handling
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
if candidate == nil {
return
}
if err = safeBRW.SendJSON(connections.NewMessageICE("ice-candidate", candidate.ToJSON())); err != nil {
slog.Error("Failed to send ICE candidate message for requested stream", "room", roomName, "err", err)
return
}
})
// Create offer
offer, err := pc.CreateOffer(nil)
if err != nil {
slog.Error("Failed to create offer for requested stream", "room", roomName, "err", err)
continue
}
if err = pc.SetLocalDescription(offer); err != nil {
slog.Error("Failed to set local description for requested stream", "room", roomName, "err", err)
continue
}
if err = safeBRW.SendJSON(connections.NewMessageSDP("offer", offer)); err != nil {
slog.Error("Failed to send offer for requested stream", "room", roomName, "err", err)
continue
}
// Store the connection
sp.servedConns.Set(stream.Conn().RemotePeer(), &StreamConnection{
pc: pc,
ndc: ndc,
})
slog.Debug("Sent offer for requested stream")
case "ice-candidate":
var iceMsg connections.MessageICE
if err := json.Unmarshal(data, &iceMsg); err != nil {
slog.Error("Failed to unmarshal ICE message", "err", err)
continue
}
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok && conn.pc.RemoteDescription() != nil {
if err := conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
slog.Error("Failed to add ICE candidate", "err", err)
}
for _, heldIce := range iceHolder {
if err := conn.pc.AddICECandidate(heldIce); err != nil {
slog.Error("Failed to add held ICE candidate", "err", err)
}
}
// Clear the held candidates
iceHolder = make([]webrtc.ICECandidateInit, 0)
} else {
// Hold the candidate until remote description is set
iceHolder = append(iceHolder, iceMsg.Candidate)
}
case "answer":
var answerMsg connections.MessageSDP
if err := json.Unmarshal(data, &answerMsg); err != nil {
slog.Error("Failed to unmarshal answer from signaling message", "err", err)
continue
}
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok {
if err := conn.pc.SetRemoteDescription(answerMsg.SDP); err != nil {
slog.Error("Failed to set remote description for answer", "err", err)
continue
}
slog.Debug("Set remote description for answer")
} else {
slog.Warn("Received answer without active PeerConnection")
}
}
}
}
// requestStream manages the internals of the stream request
func (sp *StreamProtocol) requestStream(stream network.Stream, room *shared.Room) error {
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
safeBRW := common.NewSafeBufioRW(brw)
slog.Debug("Requesting room stream from peer", "room", room.Name, "peer", stream.Conn().RemotePeer())
// Send room name to the remote peer
roomData, err := json.Marshal(room.Name)
if err != nil {
_ = stream.Close()
return fmt.Errorf("failed to marshal room name: %w", err)
}
if err = safeBRW.SendJSON(connections.NewMessageRaw(
"request-stream-room",
roomData,
)); err != nil {
_ = stream.Close()
return fmt.Errorf("failed to send room request: %w", err)
}
pc, err := common.CreatePeerConnection(func() {
slog.Info("Relay PeerConnection closed for requested stream", "room", room.Name)
_ = stream.Close() // ignore error as may be closed already
// Cleanup the stream connection
if ok := sp.requestedConns.Has(room.Name); ok {
sp.requestedConns.Delete(room.Name)
}
})
if err != nil {
_ = stream.Close()
return fmt.Errorf("failed to create PeerConnection: %w", err)
}
pc.OnTrack(func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
localTrack, _ := webrtc.NewTrackLocalStaticRTP(track.Codec().RTPCodecCapability, track.ID(), "relay-"+room.Name+"-"+track.Kind().String())
slog.Debug("Received track for requested stream", "room", room.Name, "track_kind", track.Kind().String())
room.SetTrack(track.Kind(), localTrack)
go func() {
for {
rtpPacket, _, err := track.ReadRTP()
if err != nil {
if !errors.Is(err, io.EOF) {
slog.Error("Failed to read RTP packet for requested stream room", "room", room.Name, "err", err)
}
break
}
err = localTrack.WriteRTP(rtpPacket)
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
slog.Error("Failed to write RTP to local track for requested stream room", "room", room.Name, "err", err)
break
}
}
}()
})
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
ndc := connections.NewNestriDataChannel(dc)
ndc.RegisterOnOpen(func() {
slog.Debug("Relay DataChannel opened for requested stream", "room", room.Name)
})
ndc.RegisterOnClose(func() {
slog.Debug("Relay DataChannel closed for requested stream", "room", room.Name)
})
// Set the DataChannel in the requestedConns map
if conn, ok := sp.requestedConns.Get(room.Name); ok {
conn.ndc = ndc
} else {
sp.requestedConns.Set(room.Name, &StreamConnection{
pc: pc,
ndc: ndc,
})
}
// We do not handle any messages from upstream here
})
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
if candidate == nil {
return
}
if err = safeBRW.SendJSON(connections.NewMessageICE(
"ice-candidate",
candidate.ToJSON(),
)); err != nil {
slog.Error("Failed to send ICE candidate message for requested stream", "room", room.Name, "err", err)
return
}
})
// Handle incoming messages (offer and candidates)
go func() {
iceHolder := make([]webrtc.ICECandidateInit, 0)
for {
data, err := safeBRW.Receive()
if err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
slog.Debug("Connection for requested stream closed by peer", "room", room.Name)
return
}
slog.Error("Failed to receive data for requested stream", "room", room.Name, "err", err)
_ = stream.Reset()
return
}
var baseMsg connections.MessageBase
if err = json.Unmarshal(data, &baseMsg); err != nil {
slog.Error("Failed to unmarshal base message for requested stream", "room", room.Name, "err", err)
return
}
switch baseMsg.Type {
case "ice-candidate":
var iceMsg connections.MessageICE
if err = json.Unmarshal(data, &iceMsg); err != nil {
slog.Error("Failed to unmarshal ICE candidate for requested stream", "room", room.Name, "err", err)
continue
}
if conn, ok := sp.requestedConns.Get(room.Name); ok && conn.pc.RemoteDescription() != nil {
if err = conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
slog.Error("Failed to add ICE candidate for requested stream", "room", room.Name, "err", err)
}
// Add held candidates
for _, heldCandidate := range iceHolder {
if err = conn.pc.AddICECandidate(heldCandidate); err != nil {
slog.Error("Failed to add held ICE candidate for requested stream", "room", room.Name, "err", err)
}
}
// Clear the held candidates
iceHolder = make([]webrtc.ICECandidateInit, 0)
} else {
// Hold the candidate until remote description is set
iceHolder = append(iceHolder, iceMsg.Candidate)
}
case "offer":
var offerMsg connections.MessageSDP
if err = json.Unmarshal(data, &offerMsg); err != nil {
slog.Error("Failed to unmarshal offer for requested stream", "room", room.Name, "err", err)
continue
}
if err = pc.SetRemoteDescription(offerMsg.SDP); err != nil {
slog.Error("Failed to set remote description for requested stream", "room", room.Name, "err", err)
continue
}
answer, err := pc.CreateAnswer(nil)
if err != nil {
slog.Error("Failed to create answer for requested stream", "room", room.Name, "err", err)
if err = stream.Reset(); err != nil {
slog.Error("Failed to reset stream for requested stream", "err", err)
}
return
}
if err = pc.SetLocalDescription(answer); err != nil {
slog.Error("Failed to set local description for requested stream", "room", room.Name, "err", err)
if err = stream.Reset(); err != nil {
slog.Error("Failed to reset stream for requested stream", "err", err)
}
return
}
if err = safeBRW.SendJSON(connections.NewMessageSDP(
"answer",
answer,
)); err != nil {
slog.Error("Failed to send answer for requested stream", "room", room.Name, "err", err)
continue
}
// Store the connection
sp.requestedConns.Set(room.Name, &StreamConnection{
pc: pc,
ndc: nil,
})
slog.Debug("Sent answer for requested stream", "room", room.Name)
default:
slog.Warn("Unknown signaling message type", "room", room.Name, "type", baseMsg.Type)
}
}
}()
return nil
}
// handleStreamPush manages a stream push from a node (nestri-server)
func (sp *StreamProtocol) handleStreamPush(stream network.Stream) {
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
safeBRW := common.NewSafeBufioRW(brw)
var room *shared.Room
iceHolder := make([]webrtc.ICECandidateInit, 0)
for {
data, err := safeBRW.Receive()
if err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
slog.Debug("Stream push connection closed by peer", "peer", stream.Conn().RemotePeer())
return
}
slog.Error("Failed to receive data for stream push", "err", err)
_ = stream.Reset()
return
}
var baseMsg connections.MessageBase
if err = json.Unmarshal(data, &baseMsg); err != nil {
slog.Error("Failed to unmarshal base message from base message", "err", err)
continue
}
switch baseMsg.Type {
case "push-stream-room":
var rawMsg connections.MessageRaw
if err = json.Unmarshal(data, &rawMsg); err != nil {
slog.Error("Failed to unmarshal room name from data", "err", err)
continue
}
var roomName string
if err = json.Unmarshal(rawMsg.Data, &roomName); err != nil {
slog.Error("Failed to unmarshal room name from raw message", "err", err)
continue
}
slog.Info("Received stream push request for room", "room", roomName)
room = sp.relay.GetRoomByName(roomName)
if room != nil {
if room.OwnerID != sp.relay.ID {
slog.Error("Cannot push a stream to non-owned room", "room", room.Name, "owner_id", room.OwnerID)
continue
}
if room.IsOnline() {
slog.Error("Cannot push a stream to already online room", "room", room.Name)
continue
}
} else {
// Create a new room if it doesn't exist
room = sp.relay.CreateRoom(roomName)
}
// Respond with an OK with the room name
roomData, err := json.Marshal(room.Name)
if err != nil {
slog.Error("Failed to marshal room name for push stream response", "err", err)
continue
}
if err = safeBRW.SendJSON(connections.NewMessageRaw(
"push-stream-ok",
roomData,
)); err != nil {
slog.Error("Failed to send push stream OK response", "room", room.Name, "err", err)
continue
}
case "ice-candidate":
var iceMsg connections.MessageICE
if err = json.Unmarshal(data, &iceMsg); err != nil {
slog.Error("Failed to unmarshal ICE candidate from data", "err", err)
continue
}
if conn, ok := sp.incomingConns.Get(room.Name); ok && conn.pc.RemoteDescription() != nil {
if err = conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
slog.Error("Failed to add ICE candidate for pushed stream", "err", err)
}
for _, heldIce := range iceHolder {
if err := conn.pc.AddICECandidate(heldIce); err != nil {
slog.Error("Failed to add held ICE candidate for pushed stream", "err", err)
}
}
// Clear the held candidates
iceHolder = make([]webrtc.ICECandidateInit, 0)
} else {
// Hold the candidate until remote description is set
iceHolder = append(iceHolder, iceMsg.Candidate)
}
case "offer":
// Make sure we have room set to push to (set by "push-stream-room")
if room == nil {
slog.Error("Received offer without room set for stream push")
continue
}
var offerMsg connections.MessageSDP
if err = json.Unmarshal(data, &offerMsg); err != nil {
slog.Error("Failed to unmarshal offer from data", "err", err)
continue
}
// Create PeerConnection for the incoming stream
pc, err := common.CreatePeerConnection(func() {
slog.Info("PeerConnection closed for pushed stream", "room", room.Name)
// Cleanup the stream connection
if ok := sp.incomingConns.Has(room.Name); ok {
sp.incomingConns.Delete(room.Name)
}
})
if err != nil {
slog.Error("Failed to create PeerConnection for pushed stream", "room", room.Name, "err", err)
continue
}
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
// TODO: Is this the best way to handle DataChannel? Should we just use the map directly?
room.DataChannel = connections.NewNestriDataChannel(dc)
room.DataChannel.RegisterOnOpen(func() {
slog.Debug("DataChannel opened for pushed stream", "room", room.Name)
})
room.DataChannel.RegisterOnClose(func() {
slog.Debug("DataChannel closed for pushed stream", "room", room.Name)
})
// Set the DataChannel in the incomingConns map
if conn, ok := sp.incomingConns.Get(room.Name); ok {
conn.ndc = room.DataChannel
} else {
sp.incomingConns.Set(room.Name, &StreamConnection{
pc: pc,
ndc: room.DataChannel,
})
}
})
pc.OnICECandidate(func(candidate *webrtc.ICECandidate) {
if candidate == nil {
return
}
if err = safeBRW.SendJSON(connections.NewMessageICE(
"ice-candidate",
candidate.ToJSON(),
)); err != nil {
slog.Error("Failed to send ICE candidate message for pushed stream", "room", room.Name, "err", err)
return
}
})
pc.OnTrack(func(remoteTrack *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
localTrack, err := webrtc.NewTrackLocalStaticRTP(remoteTrack.Codec().RTPCodecCapability, remoteTrack.Kind().String(), fmt.Sprintf("nestri-%s-%s", room.Name, remoteTrack.Kind().String()))
if err != nil {
slog.Error("Failed to create local track for pushed stream", "room", room.Name, "track_kind", remoteTrack.Kind().String(), "err", err)
return
}
slog.Debug("Received track for pushed stream", "room", room.Name, "track_kind", remoteTrack.Kind().String())
// Set track for Room
room.SetTrack(remoteTrack.Kind(), localTrack)
// Prepare PlayoutDelayExtension so we don't need to recreate it for each packet
playoutExt := &rtp.PlayoutDelayExtension{
MinDelay: 0,
MaxDelay: 0,
}
playoutPayload, err := playoutExt.Marshal()
if err != nil {
slog.Error("Failed to marshal PlayoutDelayExtension for room", "room", room.Name, "err", err)
return
}
for {
rtpPacket, _, err := remoteTrack.ReadRTP()
if err != nil {
if !errors.Is(err, io.EOF) {
slog.Error("Failed to read RTP from remote track for room", "room", room.Name, "err", err)
}
break
}
// Use PlayoutDelayExtension for low latency, if set for this track kind
if extID, ok := common.GetExtension(remoteTrack.Kind(), common.ExtensionPlayoutDelay); ok {
if err := rtpPacket.SetExtension(extID, playoutPayload); err != nil {
slog.Error("Failed to set PlayoutDelayExtension for room", "room", room.Name, "err", err)
continue
}
}
err = localTrack.WriteRTP(rtpPacket)
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
slog.Error("Failed to write RTP to local track for room", "room", room.Name, "err", err)
break
}
}
slog.Debug("Track closed for room", "room", room.Name, "track_kind", remoteTrack.Kind().String())
// Cleanup the track from the room
room.SetTrack(remoteTrack.Kind(), nil)
})
// Set the remote description
if err = pc.SetRemoteDescription(offerMsg.SDP); err != nil {
slog.Error("Failed to set remote description for pushed stream", "room", room.Name, "err", err)
continue
}
slog.Debug("Set remote description for pushed stream", "room", room.Name)
// Create an answer
answer, err := pc.CreateAnswer(nil)
if err != nil {
slog.Error("Failed to create answer for pushed stream", "room", room.Name, "err", err)
continue
}
if err = pc.SetLocalDescription(answer); err != nil {
slog.Error("Failed to set local description for pushed stream", "room", room.Name, "err", err)
continue
}
if err = safeBRW.SendJSON(connections.NewMessageSDP(
"answer",
answer,
)); err != nil {
slog.Error("Failed to send answer for pushed stream", "room", room.Name, "err", err)
}
// Store the connection
sp.incomingConns.Set(room.Name, &StreamConnection{
pc: pc,
ndc: room.DataChannel, // if it exists, if not it will be set later
})
slog.Debug("Sent answer for pushed stream", "room", room.Name)
}
}
}
// --- Public Usable Methods ---
// RequestStream sends a request to get room stream from another relay
func (sp *StreamProtocol) RequestStream(ctx context.Context, room *shared.Room, peerID peer.ID) error {
stream, err := sp.relay.Host.NewStream(ctx, peerID, protocolStreamRequest)
if err != nil {
return fmt.Errorf("failed to create stream request: %w", err)
}
return sp.requestStream(stream, room)
}

View File

@@ -0,0 +1,13 @@
package core
// ProtocolRegistry is a type holding all protocols to split away the bloat
type ProtocolRegistry struct {
StreamProtocol *StreamProtocol
}
// NewProtocolRegistry initializes and returns a new protocol registry
func NewProtocolRegistry(relay *Relay) ProtocolRegistry {
return ProtocolRegistry{
StreamProtocol: NewStreamProtocol(relay),
}
}

View File

@@ -0,0 +1,108 @@
package core
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"relay/internal/shared"
"github.com/libp2p/go-libp2p/core/network"
"github.com/oklog/ulid/v2"
)
// --- Room Management ---
// GetRoomByID retrieves a local Room struct by its ULID
func (r *Relay) GetRoomByID(id ulid.ULID) *shared.Room {
if room, ok := r.LocalRooms.Get(id); ok {
return room
}
return nil
}
// GetRoomByName retrieves a local Room struct by its name
func (r *Relay) GetRoomByName(name string) *shared.Room {
for _, room := range r.LocalRooms.Copy() {
if room.Name == name {
return room
}
}
return nil
}
// CreateRoom creates a new local Room struct with the given name
func (r *Relay) CreateRoom(name string) *shared.Room {
roomID := ulid.Make()
room := shared.NewRoom(name, roomID, r.ID)
r.LocalRooms.Set(room.ID, room)
slog.Debug("Created new local room", "room", name, "id", room.ID)
return room
}
// DeleteRoomIfEmpty checks if a local room struct is inactive and can be removed
func (r *Relay) DeleteRoomIfEmpty(room *shared.Room) {
if room == nil {
return
}
if room.Participants.Len() == 0 && r.LocalRooms.Has(room.ID) {
slog.Debug("Deleting empty room without participants", "room", room.Name)
r.LocalRooms.Delete(room.ID)
err := room.PeerConnection.Close()
if err != nil {
slog.Error("Failed to close Room PeerConnection", "room", room.Name, "err", err)
}
}
}
// GetRemoteRoomByName returns room from mesh by name
func (r *Relay) GetRemoteRoomByName(roomName string) *shared.RoomInfo {
for _, room := range r.MeshRooms.Copy() {
if room.Name == roomName && room.OwnerID != r.ID {
// Make sure connection is alive
if r.Host.Network().Connectedness(room.OwnerID) == network.Connected {
return &room
} else {
slog.Debug("Removing stale peer, owns a room without connection", "room", roomName, "peer", room.OwnerID)
r.onPeerDisconnected(room.OwnerID)
}
}
}
return nil
}
// --- State Publishing ---
// publishRoomStates publishes the state of all rooms currently owned by *this* relay
func (r *Relay) publishRoomStates(ctx context.Context) error {
if r.pubTopicState == nil {
slog.Warn("Cannot publish room states: topic is nil")
return nil
}
var statesToPublish []shared.RoomInfo
r.LocalRooms.Range(func(id ulid.ULID, room *shared.Room) bool {
// Only publish state for rooms owned by this relay
if room.OwnerID == r.ID {
statesToPublish = append(statesToPublish, shared.RoomInfo{
ID: room.ID,
Name: room.Name,
OwnerID: r.ID,
})
}
return true // Continue iteration
})
if len(statesToPublish) == 0 {
return nil
}
data, err := json.Marshal(statesToPublish)
if err != nil {
return fmt.Errorf("failed to marshal local room states: %w", err)
}
if pubErr := r.pubTopicState.Publish(ctx, data); pubErr != nil {
slog.Error("Failed to publish room states message", "err", pubErr)
}
return nil
}

View File

@@ -0,0 +1,173 @@
package core
import (
"context"
"encoding/json"
"errors"
"log/slog"
"relay/internal/shared"
"time"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
)
// --- PubSub Message Handlers ---
// handleRoomStateMessages processes incoming room state updates from peers.
func (r *Relay) handleRoomStateMessages(ctx context.Context, sub *pubsub.Subscription) {
slog.Debug("Starting room state message handler...")
for {
select {
case <-ctx.Done():
slog.Info("Stopping room state message handler")
return
default:
msg, err := sub.Next(ctx)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, pubsub.ErrSubscriptionCancelled) || errors.Is(err, context.DeadlineExceeded) {
slog.Info("Room state subscription ended", "err", err)
return
}
slog.Error("Error receiving room state message", "err", err)
time.Sleep(1 * time.Second)
continue
}
if msg.GetFrom() == r.Host.ID() {
continue
}
var states []shared.RoomInfo
if err := json.Unmarshal(msg.Data, &states); err != nil {
slog.Error("Failed to unmarshal room states", "from", msg.GetFrom(), "data_len", len(msg.Data), "err", err)
continue
}
r.updateMeshRoomStates(msg.GetFrom(), states)
}
}
}
// handleRelayMetricsMessages processes incoming status updates from peers.
func (r *Relay) handleRelayMetricsMessages(ctx context.Context, sub *pubsub.Subscription) {
slog.Debug("Starting relay metrics message handler...")
for {
select {
case <-ctx.Done():
slog.Info("Stopping relay metrics message handler")
return
default:
msg, err := sub.Next(ctx)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, pubsub.ErrSubscriptionCancelled) || errors.Is(err, context.DeadlineExceeded) {
slog.Info("Relay metrics subscription ended", "err", err)
return
}
slog.Error("Error receiving relay metrics message", "err", err)
time.Sleep(1 * time.Second)
continue
}
if msg.GetFrom() == r.Host.ID() {
continue
}
var info RelayInfo
if err := json.Unmarshal(msg.Data, &info); err != nil {
slog.Error("Failed to unmarshal relay status", "from", msg.GetFrom(), "data_len", len(msg.Data), "err", err)
continue
}
if info.ID != msg.GetFrom() {
slog.Error("Peer ID mismatch in relay status", "expected", info.ID, "actual", msg.GetFrom())
continue
}
r.onPeerStatus(info)
}
}
}
// --- State Check Functions ---
// hasConnectedPeer checks if peer is in map and has a valid connection
func (r *Relay) hasConnectedPeer(peerID peer.ID) bool {
if _, ok := r.LocalMeshPeers.Get(peerID); !ok {
return false
}
if r.Host.Network().Connectedness(peerID) != network.Connected {
slog.Debug("Peer not connected", "peer", peerID)
return false
}
return true
}
// --- State Change Functions ---
// onPeerStatus updates the status of a peer based on received metrics, adding local perspective
func (r *Relay) onPeerStatus(recvInfo RelayInfo) {
r.LocalMeshPeers.Set(recvInfo.ID, &recvInfo)
}
// onPeerConnected is called when a new peer connects to the relay
func (r *Relay) onPeerConnected(peerID peer.ID) {
// Add to local peer map
r.LocalMeshPeers.Set(peerID, &RelayInfo{
ID: peerID,
})
slog.Info("Peer connected", "peer", peerID)
// Trigger immediate state exchange
go func() {
if err := r.publishRelayMetrics(context.Background()); err != nil {
slog.Error("Failed to publish relay metrics on connect", "err", err)
} else {
if err = r.publishRoomStates(context.Background()); err != nil {
slog.Error("Failed to publish room states on connect", "err", err)
}
}
}()
}
// onPeerDisconnected marks a peer as disconnected in our status view and removes latency info
func (r *Relay) onPeerDisconnected(peerID peer.ID) {
slog.Info("Mesh peer disconnected, deleting from local peer map", "peer", peerID)
// Remove peer from local mesh peers
if r.LocalMeshPeers.Has(peerID) {
r.LocalMeshPeers.Delete(peerID)
}
// Remove any rooms associated with this peer
if r.MeshRooms.Has(peerID.String()) {
r.MeshRooms.Delete(peerID.String())
}
// Remove any latencies associated with this peer
if r.LocalMeshPeers.Has(peerID) {
r.LocalMeshPeers.Delete(peerID)
}
// TODO: If any rooms were routed through this peer, handle that case
}
// updateMeshRoomStates merges received room states into the MeshRooms map
// TODO: Wrap in another type with timestamp or another mechanism to avoid conflicts
func (r *Relay) updateMeshRoomStates(peerID peer.ID, states []shared.RoomInfo) {
for _, state := range states {
if state.OwnerID == r.ID {
continue
}
// If previously did not exist, but does now, request a connection if participants exist for our room
existed := r.MeshRooms.Has(state.ID.String())
if !existed {
// Request connection to this peer if we have participants in our local room
if room, ok := r.LocalRooms.Get(state.ID); ok {
if room.Participants.Len() > 0 {
slog.Debug("Got new remote room state, we locally have participants for, requesting stream", "room_name", room.Name, "peer", peerID)
if err := r.StreamProtocol.RequestStream(context.Background(), room, peerID); err != nil {
slog.Error("Failed to request stream for new remote room state", "room_name", room.Name, "peer", peerID, "err", err)
}
}
}
}
r.MeshRooms.Set(state.ID.String(), state)
}
}