mirror of
https://github.com/nestriness/nestri.git
synced 2025-12-12 08:45:38 +02:00
feat: Controller support, performance enchancements, multi-stage images, fixes (#304)
## Description Oops.. another massive PR 🥲 This PR contains multiple improvements and changes. Firstly, thanks gst-wayland-display's PR [here](https://github.com/games-on-whales/gst-wayland-display/pull/20). NVIDIA path is now way more efficient than before. Secondly, adding controller support was a massive hurdle, requiring me to start another project [vimputti](https://github.com/DatCaptainHorse/vimputti) - which allows simple virtual controller inputs in isolated containers. Well, it's not simple, it includes LD_PRELOAD shims and other craziness, but the library API is simple to use.. Thirdly, split runner image into 3 separate stages, base + build + runtime, should help keep things in check in future, also added GitHub Actions CI builds for v2 to v4 builds (hopefully they pass..). Fourth, replaced the runner's runtime Steam patching with better and simpler bubblewrap patch, massive thanks to `games-on-whales` to figuring it out better! Fifth, relay for once needed some changes, the new changes are still mostly WIP, but I'll deal with them next time I have energy.. I'm spent now. Needed to include these changes as relay needed a minor change to allow rumble events to flow back to client peer. Sixth.. tons of package updates, minor code improvements and the usual. <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **New Features** * End-to-end gamepad/controller support (attach/detach, buttons, sticks, triggers, rumble) with client/server integration and virtual controller plumbing. * Optional Prometheus metrics endpoint and WebTransport support. * Background vimputti manager process added for controller handling. * **Improvements** * Multi-variant container image builds and streamlined runtime images. * Zero-copy video pipeline and encoder improvements for lower latency. * Updated Steam compat mapping and dependency/toolchain refreshes. * **Bug Fixes** * More robust GPU detection, input/fullscreen lifecycle, startup/entrypoint, and container runtime fixes. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Co-authored-by: DatCaptainHorse <DatCaptainHorse@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
a3ee9aadd9
commit
c62a22b552
@@ -4,24 +4,31 @@ import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"relay/internal/common"
|
||||
"relay/internal/shared"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
|
||||
webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// -- Variables --
|
||||
@@ -30,17 +37,9 @@ var globalRelay *Relay
|
||||
|
||||
// -- Structs --
|
||||
|
||||
// RelayInfo contains light information of Relay, in mesh-friendly format
|
||||
type RelayInfo struct {
|
||||
ID peer.ID
|
||||
MeshAddrs []string // Addresses of this relay
|
||||
MeshRooms *common.SafeMap[string, shared.RoomInfo] // Rooms hosted by this relay
|
||||
MeshLatencies *common.SafeMap[string, time.Duration] // Latencies to other peers from this relay
|
||||
}
|
||||
|
||||
// Relay structure enhanced with metrics and state
|
||||
type Relay struct {
|
||||
RelayInfo
|
||||
*PeerInfo
|
||||
|
||||
Host host.Host // libp2p host for peer-to-peer networking
|
||||
PubSub *pubsub.PubSub // PubSub for state synchronization
|
||||
@@ -48,7 +47,6 @@ type Relay struct {
|
||||
|
||||
// Local
|
||||
LocalRooms *common.SafeMap[ulid.ULID, *shared.Room] // room ID -> local Room struct (hosted by this relay)
|
||||
LocalMeshPeers *common.SafeMap[peer.ID, *RelayInfo] // peer ID -> mesh peer relay info (connected to this relay)
|
||||
LocalMeshConnections *common.SafeMap[peer.ID, *webrtc.PeerConnection] // peer ID -> PeerConnection (connected to this relay)
|
||||
|
||||
// Protocols
|
||||
@@ -60,11 +58,43 @@ type Relay struct {
|
||||
}
|
||||
|
||||
func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay, error) {
|
||||
// If metrics are enabled, start the metrics server first
|
||||
metricsOpts := make([]libp2p.Option, 0)
|
||||
var rmgr network.ResourceManager
|
||||
if common.GetFlags().Metrics {
|
||||
go func() {
|
||||
slog.Info("Starting prometheus metrics server at '/debug/metrics/prometheus'", "port", common.GetFlags().MetricsPort)
|
||||
http.Handle("/debug/metrics/prometheus", promhttp.Handler())
|
||||
if err := http.ListenAndServe(fmt.Sprintf(":%d", common.GetFlags().MetricsPort), nil); err != nil {
|
||||
slog.Error("Failed to start metrics server", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
rcmgr.MustRegisterWith(prometheus.DefaultRegisterer)
|
||||
|
||||
str, err := rcmgr.NewStatsTraceReporter()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rmgr, err = rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()), rcmgr.WithTraceReporter(str))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
metricsOpts = append(metricsOpts, libp2p.ResourceManager(rmgr))
|
||||
metricsOpts = append(metricsOpts, libp2p.PrometheusRegisterer(prometheus.DefaultRegisterer))
|
||||
} else {
|
||||
rmgr = nil
|
||||
}
|
||||
|
||||
listenAddrs := []string{
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port), // IPv4 - Raw TCP
|
||||
fmt.Sprintf("/ip6/::/tcp/%d", port), // IPv6 - Raw TCP
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/ws", port), // IPv4 - TCP WebSocket
|
||||
fmt.Sprintf("/ip6/::/tcp/%d/ws", port), // IPv6 - TCP WebSocket
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port), // IPv4 - Raw TCP
|
||||
fmt.Sprintf("/ip6/::/tcp/%d", port), // IPv6 - Raw TCP
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/ws", port), // IPv4 - TCP WebSocket
|
||||
fmt.Sprintf("/ip6/::/tcp/%d/ws", port), // IPv6 - TCP WebSocket
|
||||
fmt.Sprintf("/ip4/0.0.0.0/udp/%d/quic-v1/webtransport", port), // IPv4 - UDP QUIC WebTransport
|
||||
fmt.Sprintf("/ip6/::/udp/%d/quic-v1/webtransport", port), // IPv6 - UDP QUIC WebTransport
|
||||
}
|
||||
|
||||
var muAddrs []multiaddr.Multiaddr
|
||||
@@ -78,11 +108,12 @@ func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay
|
||||
|
||||
// Initialize libp2p host
|
||||
p2pHost, err := libp2p.New(
|
||||
// TODO: Currently static identity
|
||||
libp2p.ChainOptions(metricsOpts...),
|
||||
libp2p.Identity(identityKey),
|
||||
// Enable required transports
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(ws.New),
|
||||
libp2p.Transport(webtransport.New),
|
||||
// Other options
|
||||
libp2p.ListenAddrs(muAddrs...),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
@@ -91,6 +122,7 @@ func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay
|
||||
libp2p.EnableNATService(),
|
||||
libp2p.EnableAutoNATv2(),
|
||||
libp2p.ShareTCPListener(),
|
||||
libp2p.QUICReuse(quicreuse.NewConnManager),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create libp2p host for relay: %w", err)
|
||||
@@ -105,23 +137,13 @@ func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay
|
||||
// Initialize Ping Service
|
||||
pingSvc := ping.NewPingService(p2pHost)
|
||||
|
||||
var addresses []string
|
||||
for _, addr := range p2pHost.Addrs() {
|
||||
addresses = append(addresses, addr.String())
|
||||
}
|
||||
|
||||
r := &Relay{
|
||||
RelayInfo: RelayInfo{
|
||||
ID: p2pHost.ID(),
|
||||
MeshAddrs: addresses,
|
||||
MeshRooms: common.NewSafeMap[string, shared.RoomInfo](),
|
||||
MeshLatencies: common.NewSafeMap[string, time.Duration](),
|
||||
},
|
||||
Host: p2pHost,
|
||||
PubSub: p2pPubsub,
|
||||
PingService: pingSvc,
|
||||
LocalRooms: common.NewSafeMap[ulid.ULID, *shared.Room](),
|
||||
LocalMeshPeers: common.NewSafeMap[peer.ID, *RelayInfo](),
|
||||
PeerInfo: NewPeerInfo(p2pHost.ID(), p2pHost.Addrs()),
|
||||
Host: p2pHost,
|
||||
PubSub: p2pPubsub,
|
||||
PingService: pingSvc,
|
||||
LocalRooms: common.NewSafeMap[ulid.ULID, *shared.Room](),
|
||||
LocalMeshConnections: common.NewSafeMap[peer.ID, *webrtc.PeerConnection](),
|
||||
}
|
||||
|
||||
// Add network notifier after relay is initialized
|
||||
@@ -152,7 +174,7 @@ func NewRelay(ctx context.Context, port int, identityKey crypto.PrivKey) (*Relay
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
|
||||
func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) (*Relay, error) {
|
||||
var err error
|
||||
persistentDir := common.GetFlags().PersistDir
|
||||
|
||||
@@ -164,7 +186,7 @@ func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
|
||||
if hasIdentity {
|
||||
_, err = os.Stat(persistentDir + "/identity.key")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to check identity key file: %w", err)
|
||||
return nil, fmt.Errorf("failed to check identity key file: %w", err)
|
||||
} else if os.IsNotExist(err) {
|
||||
hasIdentity = false
|
||||
}
|
||||
@@ -172,17 +194,17 @@ func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
|
||||
if !hasIdentity {
|
||||
// Make sure the persistent directory exists
|
||||
if err = os.MkdirAll(persistentDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create persistent data directory: %w", err)
|
||||
return nil, fmt.Errorf("failed to create persistent data directory: %w", err)
|
||||
}
|
||||
// Generate
|
||||
slog.Info("Generating new identity for relay")
|
||||
privKey, err = common.GenerateED25519Key()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate new identity: %w", err)
|
||||
return nil, fmt.Errorf("failed to generate new identity: %w", err)
|
||||
}
|
||||
// Save the key
|
||||
if err = common.SaveED25519Key(privKey, persistentDir+"/identity.key"); err != nil {
|
||||
return fmt.Errorf("failed to save identity key: %w", err)
|
||||
return nil, fmt.Errorf("failed to save identity key: %w", err)
|
||||
}
|
||||
slog.Info("New identity generated and saved", "path", persistentDir+"/identity.key")
|
||||
} else {
|
||||
@@ -190,25 +212,45 @@ func InitRelay(ctx context.Context, ctxCancel context.CancelFunc) error {
|
||||
// Load the key
|
||||
privKey, err = common.LoadED25519Key(persistentDir + "/identity.key")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load identity key: %w", err)
|
||||
return nil, fmt.Errorf("failed to load identity key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to libp2p crypto.PrivKey
|
||||
identityKey, err = crypto.UnmarshalEd25519PrivateKey(privKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal ED25519 private key: %w", err)
|
||||
return nil, fmt.Errorf("failed to unmarshal ED25519 private key: %w", err)
|
||||
}
|
||||
|
||||
globalRelay, err = NewRelay(ctx, common.GetFlags().EndpointPort, identityKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relay: %w", err)
|
||||
return nil, fmt.Errorf("failed to create relay: %w", err)
|
||||
}
|
||||
|
||||
if err = common.InitWebRTCAPI(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slog.Info("Relay initialized", "id", globalRelay.ID)
|
||||
return nil
|
||||
|
||||
// Load previous peers on startup
|
||||
defaultFile := common.GetFlags().PersistDir + "/peerstore.json"
|
||||
if err = globalRelay.LoadFromFile(defaultFile); err != nil {
|
||||
slog.Warn("Failed to load previous peer store", "error", err)
|
||||
} else {
|
||||
globalRelay.Peers.Range(func(id peer.ID, pi *PeerInfo) bool {
|
||||
if len(pi.Addrs) <= 0 {
|
||||
slog.Warn("Peer from peer store has no addresses", "peer", id)
|
||||
return true
|
||||
}
|
||||
|
||||
// Connect to first address only
|
||||
if err = globalRelay.ConnectToPeer(context.Background(), pi.Addrs[0]); err != nil {
|
||||
slog.Error("Failed to connect to peer from peer store", "peer", id, "error", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
return globalRelay, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ type discoveryNotifee struct {
|
||||
|
||||
func (d *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
|
||||
if d.relay != nil {
|
||||
if err := d.relay.connectToRelay(context.Background(), &pi); err != nil {
|
||||
if err := d.relay.connectToPeer(context.Background(), &pi); err != nil {
|
||||
slog.Error("failed to connect to discovered relay", "peer", pi.ID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (r *Relay) publishRelayMetrics(ctx context.Context) error {
|
||||
// Check all peer latencies
|
||||
r.checkAllPeerLatencies(ctx)
|
||||
|
||||
data, err := json.Marshal(r.RelayInfo)
|
||||
data, err := json.Marshal(r.PeerInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal relay status: %w", err)
|
||||
}
|
||||
@@ -109,8 +109,8 @@ func (r *Relay) measureLatencyToPeer(ctx context.Context, peerID peer.ID) {
|
||||
if result.Error != nil {
|
||||
slog.Warn("Latency check failed, removing peer from local peers map", "peer", peerID, "err", result.Error)
|
||||
// Remove from MeshPeers if ping failed
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
if r.Peers.Has(peerID) {
|
||||
r.Peers.Delete(peerID)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -123,6 +123,6 @@ func (r *Relay) measureLatencyToPeer(ctx context.Context, peerID peer.ID) {
|
||||
latency = 1 * time.Microsecond
|
||||
}
|
||||
|
||||
r.RelayInfo.MeshLatencies.Set(peerID.String(), latency)
|
||||
r.PeerInfo.Latencies.Set(peerID, latency)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ type networkNotifier struct {
|
||||
|
||||
// Connected is called when a connection is established
|
||||
func (n *networkNotifier) Connected(net network.Network, conn network.Conn) {
|
||||
if n.relay == nil {
|
||||
if n.relay != nil {
|
||||
n.relay.onPeerConnected(conn.RemotePeer())
|
||||
}
|
||||
}
|
||||
@@ -75,8 +75,8 @@ func (r *Relay) setupPubSub(ctx context.Context) error {
|
||||
|
||||
// --- Connection Management ---
|
||||
|
||||
// connectToRelay is internal method to connect to a relay peer using multiaddresses
|
||||
func (r *Relay) connectToRelay(ctx context.Context, peerInfo *peer.AddrInfo) error {
|
||||
// connectToPeer is internal method to connect to a peer using multiaddresses
|
||||
func (r *Relay) connectToPeer(ctx context.Context, peerInfo *peer.AddrInfo) error {
|
||||
if peerInfo.ID == r.ID {
|
||||
return errors.New("cannot connect to self")
|
||||
}
|
||||
@@ -94,19 +94,14 @@ func (r *Relay) connectToRelay(ctx context.Context, peerInfo *peer.AddrInfo) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectToRelay connects to another relay by its multiaddress.
|
||||
func (r *Relay) ConnectToRelay(ctx context.Context, addr string) error {
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid multiaddress: %w", err)
|
||||
}
|
||||
|
||||
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
||||
// ConnectToPeer connects to another peer by its multiaddress.
|
||||
func (r *Relay) ConnectToPeer(ctx context.Context, addr multiaddr.Multiaddr) error {
|
||||
peerInfo, err := peer.AddrInfoFromP2pAddr(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract peer info: %w", err)
|
||||
}
|
||||
|
||||
return r.connectToRelay(ctx, peerInfo)
|
||||
return r.connectToPeer(ctx, peerInfo)
|
||||
}
|
||||
|
||||
// printConnectInstructions logs the multiaddresses for connecting to this relay.
|
||||
|
||||
77
packages/relay/internal/core/peer.go
Normal file
77
packages/relay/internal/core/peer.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"os"
|
||||
"relay/internal/common"
|
||||
"relay/internal/shared"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// PeerInfo contains information of a peer, in light transmit-friendly format
|
||||
type PeerInfo struct {
|
||||
ID peer.ID
|
||||
Addrs []multiaddr.Multiaddr // Addresses of this peer
|
||||
Peers *common.SafeMap[peer.ID, *PeerInfo] // Peers connected to this peer
|
||||
Latencies *common.SafeMap[peer.ID, time.Duration] // Latencies to other peers from this peer
|
||||
Rooms *common.SafeMap[string, shared.RoomInfo] // Rooms this peer is part of or owner of
|
||||
}
|
||||
|
||||
func NewPeerInfo(id peer.ID, addrs []multiaddr.Multiaddr) *PeerInfo {
|
||||
return &PeerInfo{
|
||||
ID: id,
|
||||
Addrs: addrs,
|
||||
Peers: common.NewSafeMap[peer.ID, *PeerInfo](),
|
||||
Latencies: common.NewSafeMap[peer.ID, time.Duration](),
|
||||
Rooms: common.NewSafeMap[string, shared.RoomInfo](),
|
||||
}
|
||||
}
|
||||
|
||||
// SaveToFile saves the peer store to a JSON file in persistent path
|
||||
func (pi *PeerInfo) SaveToFile(filePath string) error {
|
||||
if len(filePath) <= 0 {
|
||||
return errors.New("filepath is not set")
|
||||
}
|
||||
|
||||
// Marshal the peer store to JSON array (we don't need to store IDs..)
|
||||
data, err := pi.Peers.MarshalJSON()
|
||||
if err != nil {
|
||||
return errors.New("failed to marshal peer store data: " + err.Error())
|
||||
}
|
||||
|
||||
// Save the data to a file
|
||||
if err = os.WriteFile(filePath, data, 0644); err != nil {
|
||||
return errors.New("failed to save peer store to file: " + err.Error())
|
||||
}
|
||||
|
||||
slog.Info("PeerStore saved to file", "path", filePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromFile loads the peer store from a JSON file in persistent path
|
||||
func (pi *PeerInfo) LoadFromFile(filePath string) error {
|
||||
if len(filePath) <= 0 {
|
||||
return errors.New("filepath is not set")
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
slog.Info("PeerStore file does not exist, starting with empty store")
|
||||
return nil // No peers to load
|
||||
}
|
||||
return errors.New("failed to read peer store file: " + err.Error())
|
||||
}
|
||||
|
||||
// Unmarshal the JSON data into the peer store
|
||||
if err = pi.Peers.UnmarshalJSON(data); err != nil {
|
||||
return errors.New("failed to unmarshal peer store data: " + err.Error())
|
||||
}
|
||||
|
||||
slog.Info("PeerStore loaded from file", "path", filePath)
|
||||
return nil
|
||||
}
|
||||
@@ -40,15 +40,15 @@ type StreamConnection struct {
|
||||
// StreamProtocol deals with meshed stream forwarding
|
||||
type StreamProtocol struct {
|
||||
relay *Relay
|
||||
servedConns *common.SafeMap[peer.ID, *StreamConnection] // peer ID -> StreamConnection (for served streams)
|
||||
incomingConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for incoming pushed streams)
|
||||
requestedConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for requested streams from other relays)
|
||||
servedConns *common.SafeMap[string, *common.SafeMap[peer.ID, *StreamConnection]] // room name -> (peer ID -> StreamConnection) (for served streams)
|
||||
incomingConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for incoming pushed streams)
|
||||
requestedConns *common.SafeMap[string, *StreamConnection] // room name -> StreamConnection (for requested streams from other relays)
|
||||
}
|
||||
|
||||
func NewStreamProtocol(relay *Relay) *StreamProtocol {
|
||||
protocol := &StreamProtocol{
|
||||
relay: relay,
|
||||
servedConns: common.NewSafeMap[peer.ID, *StreamConnection](),
|
||||
servedConns: common.NewSafeMap[string, *common.SafeMap[peer.ID, *StreamConnection]](),
|
||||
incomingConns: common.NewSafeMap[string, *StreamConnection](),
|
||||
requestedConns: common.NewSafeMap[string, *StreamConnection](),
|
||||
}
|
||||
@@ -66,6 +66,7 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
brw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
|
||||
safeBRW := common.NewSafeBufioRW(brw)
|
||||
|
||||
var currentRoomName string // Track the current room for this stream
|
||||
iceHolder := make([]webrtc.ICECandidateInit, 0)
|
||||
for {
|
||||
data, err := safeBRW.Receive()
|
||||
@@ -101,7 +102,9 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
continue
|
||||
}
|
||||
|
||||
currentRoomName = roomName // Store the room name
|
||||
slog.Info("Received stream request for room", "room", roomName)
|
||||
|
||||
room := sp.relay.GetRoomByName(roomName)
|
||||
if room == nil || !room.IsOnline() || room.OwnerID != sp.relay.ID {
|
||||
// TODO: Allow forward requests to other relays from here?
|
||||
@@ -126,8 +129,12 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
pc, err := common.CreatePeerConnection(func() {
|
||||
slog.Info("PeerConnection closed for requested stream", "room", roomName)
|
||||
// Cleanup the stream connection
|
||||
if ok := sp.servedConns.Has(stream.Conn().RemotePeer()); ok {
|
||||
sp.servedConns.Delete(stream.Conn().RemotePeer())
|
||||
if roomMap, ok := sp.servedConns.Get(roomName); ok {
|
||||
roomMap.Delete(stream.Conn().RemotePeer())
|
||||
// If the room map is empty, delete it
|
||||
if roomMap.Len() == 0 {
|
||||
sp.servedConns.Delete(roomName)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
@@ -204,7 +211,12 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
}
|
||||
|
||||
// Store the connection
|
||||
sp.servedConns.Set(stream.Conn().RemotePeer(), &StreamConnection{
|
||||
roomMap, ok := sp.servedConns.Get(roomName)
|
||||
if !ok {
|
||||
roomMap = common.NewSafeMap[peer.ID, *StreamConnection]()
|
||||
sp.servedConns.Set(roomName, roomMap)
|
||||
}
|
||||
roomMap.Set(stream.Conn().RemotePeer(), &StreamConnection{
|
||||
pc: pc,
|
||||
ndc: ndc,
|
||||
})
|
||||
@@ -216,17 +228,25 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
slog.Error("Failed to unmarshal ICE message", "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok && conn.pc.RemoteDescription() != nil {
|
||||
if err := conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate", "err", err)
|
||||
}
|
||||
for _, heldIce := range iceHolder {
|
||||
if err := conn.pc.AddICECandidate(heldIce); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate", "err", err)
|
||||
// Use currentRoomName to get the connection from nested map
|
||||
if len(currentRoomName) > 0 {
|
||||
if roomMap, ok := sp.servedConns.Get(currentRoomName); ok {
|
||||
if conn, ok := roomMap.Get(stream.Conn().RemotePeer()); ok && conn.pc.RemoteDescription() != nil {
|
||||
if err := conn.pc.AddICECandidate(iceMsg.Candidate); err != nil {
|
||||
slog.Error("Failed to add ICE candidate", "err", err)
|
||||
}
|
||||
for _, heldIce := range iceHolder {
|
||||
if err := conn.pc.AddICECandidate(heldIce); err != nil {
|
||||
slog.Error("Failed to add held ICE candidate", "err", err)
|
||||
}
|
||||
}
|
||||
// Clear the held candidates
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
// Hold the candidate until remote description is set
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
}
|
||||
}
|
||||
// Clear the held candidates
|
||||
iceHolder = make([]webrtc.ICECandidateInit, 0)
|
||||
} else {
|
||||
// Hold the candidate until remote description is set
|
||||
iceHolder = append(iceHolder, iceMsg.Candidate)
|
||||
@@ -237,12 +257,19 @@ func (sp *StreamProtocol) handleStreamRequest(stream network.Stream) {
|
||||
slog.Error("Failed to unmarshal answer from signaling message", "err", err)
|
||||
continue
|
||||
}
|
||||
if conn, ok := sp.servedConns.Get(stream.Conn().RemotePeer()); ok {
|
||||
if err := conn.pc.SetRemoteDescription(answerMsg.SDP); err != nil {
|
||||
slog.Error("Failed to set remote description for answer", "err", err)
|
||||
continue
|
||||
// Use currentRoomName to get the connection from nested map
|
||||
if len(currentRoomName) > 0 {
|
||||
if roomMap, ok := sp.servedConns.Get(currentRoomName); ok {
|
||||
if conn, ok := roomMap.Get(stream.Conn().RemotePeer()); ok {
|
||||
if err := conn.pc.SetRemoteDescription(answerMsg.SDP); err != nil {
|
||||
slog.Error("Failed to set remote description for answer", "err", err)
|
||||
continue
|
||||
}
|
||||
slog.Debug("Set remote description for answer")
|
||||
} else {
|
||||
slog.Warn("Received answer without active PeerConnection")
|
||||
}
|
||||
}
|
||||
slog.Debug("Set remote description for answer")
|
||||
} else {
|
||||
slog.Warn("Received answer without active PeerConnection")
|
||||
}
|
||||
@@ -452,7 +479,7 @@ func (sp *StreamProtocol) handleStreamPush(stream network.Stream) {
|
||||
data, err := safeBRW.Receive()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) {
|
||||
slog.Debug("Stream push connection closed by peer", "peer", stream.Conn().RemotePeer())
|
||||
slog.Debug("Stream push connection closed by peer", "peer", stream.Conn().RemotePeer(), "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -568,6 +595,21 @@ func (sp *StreamProtocol) handleStreamPush(stream network.Stream) {
|
||||
room.DataChannel.RegisterOnClose(func() {
|
||||
slog.Debug("DataChannel closed for pushed stream", "room", room.Name)
|
||||
})
|
||||
room.DataChannel.RegisterMessageCallback("input", func(data []byte) {
|
||||
if room.DataChannel != nil {
|
||||
// Pass to servedConns DataChannels for this specific room
|
||||
if roomMap, ok := sp.servedConns.Get(room.Name); ok {
|
||||
roomMap.Range(func(peerID peer.ID, conn *StreamConnection) bool {
|
||||
if conn.ndc != nil {
|
||||
if err = conn.ndc.SendBinary(data); err != nil {
|
||||
slog.Error("Failed to forward input message from pushed stream to viewer", "room", room.Name, "peer", peerID, "err", err)
|
||||
}
|
||||
}
|
||||
return true // Continue iteration
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Set the DataChannel in the incomingConns map
|
||||
if conn, ok := sp.incomingConns.Get(room.Name); ok {
|
||||
@@ -687,7 +729,7 @@ func (sp *StreamProtocol) handleStreamPush(stream network.Stream) {
|
||||
func (sp *StreamProtocol) RequestStream(ctx context.Context, room *shared.Room, peerID peer.ID) error {
|
||||
stream, err := sp.relay.Host.NewStream(ctx, peerID, protocolStreamRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stream request: %w", err)
|
||||
return fmt.Errorf("failed to create stream: %w", err)
|
||||
}
|
||||
|
||||
return sp.requestStream(stream, room)
|
||||
|
||||
@@ -57,15 +57,15 @@ func (r *Relay) DeleteRoomIfEmpty(room *shared.Room) {
|
||||
|
||||
// GetRemoteRoomByName returns room from mesh by name
|
||||
func (r *Relay) GetRemoteRoomByName(roomName string) *shared.RoomInfo {
|
||||
for _, room := range r.MeshRooms.Copy() {
|
||||
for _, room := range r.Rooms.Copy() {
|
||||
if room.Name == roomName && room.OwnerID != r.ID {
|
||||
// Make sure connection is alive
|
||||
if r.Host.Network().Connectedness(room.OwnerID) == network.Connected {
|
||||
return &room
|
||||
} else {
|
||||
slog.Debug("Removing stale peer, owns a room without connection", "room", roomName, "peer", room.OwnerID)
|
||||
r.onPeerDisconnected(room.OwnerID)
|
||||
}
|
||||
|
||||
slog.Debug("Removing stale peer, owns a room without connection", "room", roomName, "peer", room.OwnerID)
|
||||
r.onPeerDisconnected(room.OwnerID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -72,8 +72,8 @@ func (r *Relay) handleRelayMetricsMessages(ctx context.Context, sub *pubsub.Subs
|
||||
continue
|
||||
}
|
||||
|
||||
var info RelayInfo
|
||||
if err := json.Unmarshal(msg.Data, &info); err != nil {
|
||||
var info PeerInfo
|
||||
if err = json.Unmarshal(msg.Data, &info); err != nil {
|
||||
slog.Error("Failed to unmarshal relay status", "from", msg.GetFrom(), "data_len", len(msg.Data), "err", err)
|
||||
continue
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func (r *Relay) handleRelayMetricsMessages(ctx context.Context, sub *pubsub.Subs
|
||||
// --- State Check Functions ---
|
||||
// hasConnectedPeer checks if peer is in map and has a valid connection
|
||||
func (r *Relay) hasConnectedPeer(peerID peer.ID) bool {
|
||||
if _, ok := r.LocalMeshPeers.Get(peerID); !ok {
|
||||
if _, ok := r.Peers.Get(peerID); !ok {
|
||||
return false
|
||||
}
|
||||
if r.Host.Network().Connectedness(peerID) != network.Connected {
|
||||
@@ -102,14 +102,14 @@ func (r *Relay) hasConnectedPeer(peerID peer.ID) bool {
|
||||
// --- State Change Functions ---
|
||||
|
||||
// onPeerStatus updates the status of a peer based on received metrics, adding local perspective
|
||||
func (r *Relay) onPeerStatus(recvInfo RelayInfo) {
|
||||
r.LocalMeshPeers.Set(recvInfo.ID, &recvInfo)
|
||||
func (r *Relay) onPeerStatus(recvInfo PeerInfo) {
|
||||
r.Peers.Set(recvInfo.ID, &recvInfo)
|
||||
}
|
||||
|
||||
// onPeerConnected is called when a new peer connects to the relay
|
||||
func (r *Relay) onPeerConnected(peerID peer.ID) {
|
||||
// Add to local peer map
|
||||
r.LocalMeshPeers.Set(peerID, &RelayInfo{
|
||||
r.Peers.Set(peerID, &PeerInfo{
|
||||
ID: peerID,
|
||||
})
|
||||
|
||||
@@ -131,16 +131,12 @@ func (r *Relay) onPeerConnected(peerID peer.ID) {
|
||||
func (r *Relay) onPeerDisconnected(peerID peer.ID) {
|
||||
slog.Info("Mesh peer disconnected, deleting from local peer map", "peer", peerID)
|
||||
// Remove peer from local mesh peers
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
if r.Peers.Has(peerID) {
|
||||
r.Peers.Delete(peerID)
|
||||
}
|
||||
// Remove any rooms associated with this peer
|
||||
if r.MeshRooms.Has(peerID.String()) {
|
||||
r.MeshRooms.Delete(peerID.String())
|
||||
}
|
||||
// Remove any latencies associated with this peer
|
||||
if r.LocalMeshPeers.Has(peerID) {
|
||||
r.LocalMeshPeers.Delete(peerID)
|
||||
if r.Rooms.Has(peerID.String()) {
|
||||
r.Rooms.Delete(peerID.String())
|
||||
}
|
||||
|
||||
// TODO: If any rooms were routed through this peer, handle that case
|
||||
@@ -155,7 +151,7 @@ func (r *Relay) updateMeshRoomStates(peerID peer.ID, states []shared.RoomInfo) {
|
||||
}
|
||||
|
||||
// If previously did not exist, but does now, request a connection if participants exist for our room
|
||||
existed := r.MeshRooms.Has(state.ID.String())
|
||||
existed := r.Rooms.Has(state.ID.String())
|
||||
if !existed {
|
||||
// Request connection to this peer if we have participants in our local room
|
||||
if room, ok := r.LocalRooms.Get(state.ID); ok {
|
||||
@@ -168,6 +164,6 @@ func (r *Relay) updateMeshRoomStates(peerID peer.ID, states []shared.RoomInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
r.MeshRooms.Set(state.ID.String(), state)
|
||||
r.Rooms.Set(state.ID.String(), state)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user