chore: initial commit for v0.0.1

DChain single-node blockchain + React Native messenger client.

Core:
- PBFT consensus with multi-sig validator admission + equivocation slashing
- BadgerDB + schema migration scaffold (CurrentSchemaVersion=0)
- libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1)
- Native Go contracts (username_registry) alongside WASM (wazero)
- WebSocket gateway with topic-based fanout + Ed25519-nonce auth
- Relay mailbox with NaCl envelope encryption (X25519 + Ed25519)
- Prometheus /metrics, per-IP rate limit, body-size cap

Deployment:
- Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus
- 3-node dev compose (docker-compose.yml) with mocked internet topology
- 3-validator prod compose (deploy/prod/) for federation
- Auto-update from Gitea via /api/update-check + systemd timer
- Build-time version injection (ldflags → node --version)
- UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER)

Client (client-app/):
- Expo / React Native / NativeWind
- E2E NaCl encryption, typing indicator, contact requests
- Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch

Documentation:
- README.md, CHANGELOG.md, CONTEXT.md
- deploy/single/README.md with 6 operator scenarios
- deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design
- docs/contracts/*.md per contract
This commit is contained in:
vsecoder
2026-04-17 14:16:44 +03:00
commit 7e7393e4f8
196 changed files with 55947 additions and 0 deletions

469
p2p/host.go Normal file
View File

@@ -0,0 +1,469 @@
// Package p2p wraps go-libp2p with gossipsub and Kademlia DHT.
// The host uses the node's Ed25519 identity so the peer ID is deterministic
// across restarts.
package p2p
import (
"bufio"
"context"
"crypto/ed25519"
"encoding/json"
"fmt"
"log"
"time"
libp2p "github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
pubsub "github.com/libp2p/go-libp2p-pubsub"
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
discutil "github.com/libp2p/go-libp2p/p2p/discovery/util"
"github.com/multiformats/go-multiaddr"
"go-blockchain/blockchain"
"go-blockchain/identity"
)
const (
// Gossipsub topics (for non-consensus broadcast)
TopicTx = "dchain/tx/v1"
TopicBlocks = "dchain/blocks/v1" // committed block broadcast
// Direct stream protocols (for reliable small-N validator consensus)
ConsensusStreamProto = "/dchain/consensus/1.0.0"
DiscoveryNS = "dchain-v1"
mDNSServiceTag = "dchain-mdns"
)
// Host is a libp2p host with gossipsub topics and peer discovery.
// Consensus messages use per-peer persistent streams — this guarantees
// in-order delivery, which is critical for PBFT (PRE-PREPARE must arrive
// before PREPARE from the same sender).
type Host struct {
h host.Host
dhtNode *dht.IpfsDHT
ps *pubsub.PubSub // exposed for relay and other topic consumers
// Gossipsub for block and tx propagation
txTopic *pubsub.Topic
blocksTopic *pubsub.Topic
txSub *pubsub.Subscription
blocksSub *pubsub.Subscription
// connHandlers is called when a new peer connects.
connHandlers []func(peer.ID)
// versionAnnouncer is the peer-version gossip subsystem, set by
// StartVersionGossip. nil until that's called (e.g. during tests).
versionAnnouncer *versionAnnouncer
}
// NewHost creates a libp2p host.
// The Ed25519 identity key is used so the peer ID is stable across restarts.
//
// announceAddrs, if non-nil, replaces the addresses advertised to peers.
// Use this when the node runs on a server with a public IP that differs from
// the listen interface (VPS, Docker, NAT), e.g.:
//
// []multiaddr.Multiaddr{multiaddr.StringCast("/ip4/1.2.3.4/tcp/4001")}
//
// Without announceAddrs the host tries UPnP/NAT-PMP (libp2p.NATPortMap).
// On a direct-IP VPS or in Docker with a fixed backbone IP, pass the address
// explicitly — otherwise peers will receive unreachable internal addresses.
func NewHost(ctx context.Context, id *identity.Identity, listenAddr string, announceAddrs []multiaddr.Multiaddr) (*Host, error) {
ma, err := multiaddr.NewMultiaddr(listenAddr)
if err != nil {
return nil, fmt.Errorf("bad listen addr: %w", err)
}
// Convert stdlib Ed25519 key → libp2p crypto.PrivKey
privStd := ed25519.PrivateKey(id.PrivKey)
lk, _, err := libp2pcrypto.KeyPairFromStdKey(&privStd)
if err != nil {
return nil, fmt.Errorf("convert identity key: %w", err)
}
opts := []libp2p.Option{
libp2p.ListenAddrs(ma),
libp2p.Identity(lk),
libp2p.NATPortMap(),
}
// Override advertised addresses when explicit announce addrs are provided.
// Required for internet deployment: without this libp2p advertises the
// bind interface (0.0.0.0 → internal/loopback) which remote peers cannot reach.
if len(announceAddrs) > 0 {
announce := announceAddrs
opts = append(opts, libp2p.AddrsFactory(func(_ []multiaddr.Multiaddr) []multiaddr.Multiaddr {
return announce
}))
}
h, err := libp2p.New(opts...)
if err != nil {
return nil, fmt.Errorf("create libp2p host: %w", err)
}
// Kademlia DHT for peer discovery.
// dht.BootstrapPeers() with no args disables the default public IPFS nodes:
// this is a private chain, we don't want to gossip with the global IPFS
// network. Peer discovery happens via our own --peers bootstrap nodes.
kadDHT, err := dht.New(ctx, h,
dht.Mode(dht.ModeAutoServer),
dht.BootstrapPeers(), // empty — private network only
)
if err != nil {
h.Close()
return nil, fmt.Errorf("create dht: %w", err)
}
if err := kadDHT.Bootstrap(ctx); err != nil {
h.Close()
return nil, fmt.Errorf("dht bootstrap: %w", err)
}
// GossipSub — only for blocks and transactions (not consensus)
ps, err := pubsub.NewGossipSub(ctx, h)
if err != nil {
h.Close()
return nil, fmt.Errorf("create gossipsub: %w", err)
}
txTopic, err := ps.Join(TopicTx)
if err != nil {
return nil, err
}
blocksTopic, err := ps.Join(TopicBlocks)
if err != nil {
return nil, err
}
txSub, err := txTopic.Subscribe()
if err != nil {
return nil, err
}
blocksSub, err := blocksTopic.Subscribe()
if err != nil {
return nil, err
}
node := &Host{
h: h,
dhtNode: kadDHT,
ps: ps,
txTopic: txTopic,
blocksTopic: blocksTopic,
txSub: txSub,
blocksSub: blocksSub,
}
// mDNS — automatic discovery on the same LAN / Docker bridge network
mdnsSvc := mdns.NewMdnsService(h, mDNSServiceTag, &mdnsNotifee{node: node})
if err := mdnsSvc.Start(); err != nil {
log.Printf("[P2P] mDNS start error (non-fatal): %v", err)
}
// Notify connHandlers when a new peer connects
h.Network().Notify(&network.NotifyBundle{
ConnectedF: func(_ network.Network, c network.Conn) {
go func() {
for _, fn := range node.connHandlers {
fn(c.RemotePeer())
}
}()
},
})
log.Printf("[P2P] node started id=%s", h.ID())
for _, addr := range h.Addrs() {
log.Printf("[P2P] %s/p2p/%s", addr, h.ID())
}
return node, nil
}
// PeerID returns this node's libp2p peer ID string.
func (n *Host) PeerID() string {
return n.h.ID().String()
}
// OnPeerConnected registers a callback called when a new peer connects.
func (n *Host) OnPeerConnected(fn func(peer.ID)) {
n.connHandlers = append(n.connHandlers, fn)
}
// Advertise announces this node under DiscoveryNS in the DHT.
func (n *Host) Advertise(ctx context.Context) {
rd := routing.NewRoutingDiscovery(n.dhtNode)
discutil.Advertise(ctx, rd, DiscoveryNS)
}
// DiscoverPeers continuously searches the DHT for new peers.
// Runs a persistent loop: after each FindPeers round it waits 60 s and
// tries again, so the node reconnects after network partitions or restarts.
func (n *Host) DiscoverPeers(ctx context.Context) {
rd := routing.NewRoutingDiscovery(n.dhtNode)
go func() {
for {
select {
case <-ctx.Done():
return
default:
}
ch, err := rd.FindPeers(ctx, DiscoveryNS)
if err != nil {
select {
case <-ctx.Done():
return
case <-time.After(30 * time.Second):
}
continue
}
for p := range ch {
if p.ID == n.h.ID() {
continue
}
if n.h.Network().Connectedness(p.ID) == 0 {
if err := n.h.Connect(ctx, p); err == nil {
log.Printf("[P2P] DHT discovered %s", p.ID)
}
}
}
// Wait before the next discovery round.
select {
case <-ctx.Done():
return
case <-time.After(60 * time.Second):
}
}
}()
}
// Connect dials a peer by full multiaddr (must include /p2p/<peerID>).
func (n *Host) Connect(ctx context.Context, addrStr string) error {
ma, err := multiaddr.NewMultiaddr(addrStr)
if err != nil {
return err
}
pi, err := peer.AddrInfoFromP2pAddr(ma)
if err != nil {
return err
}
return n.h.Connect(ctx, *pi)
}
// SetConsensusMsgHandler registers the direct-stream handler for consensus messages.
// Messages from each connected peer are decoded and passed to handler.
func (n *Host) SetConsensusMsgHandler(handler func(*blockchain.ConsensusMsg)) {
n.h.SetStreamHandler(ConsensusStreamProto, func(s network.Stream) {
defer s.Close()
if err := s.SetDeadline(time.Now().Add(10 * time.Second)); err != nil {
log.Printf("[P2P] consensus stream deadline error: %v", err)
}
scanner := bufio.NewScanner(s)
scanner.Buffer(make([]byte, 1<<20), 1<<20)
for scanner.Scan() {
var msg blockchain.ConsensusMsg
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
log.Printf("[P2P] bad consensus msg: %v", err)
continue
}
handler(&msg)
}
})
}
// BroadcastConsensus sends a ConsensusMsg directly to all connected peers.
// Uses dedicated streams — reliable for small validator sets.
func (n *Host) BroadcastConsensus(msg *blockchain.ConsensusMsg) error {
data, err := json.Marshal(msg)
if err != nil {
return err
}
data = append(data, '\n')
peers := n.h.Network().Peers()
for _, pid := range peers {
pid := pid
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := n.h.NewStream(ctx, pid, ConsensusStreamProto)
if err != nil {
return // peer may not support this protocol yet
}
defer s.Close()
if err := s.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
log.Printf("[P2P] consensus write deadline to %s: %v", pid, err)
return
}
if _, err := s.Write(data); err != nil {
log.Printf("[P2P] consensus write to %s: %v", pid, err)
}
}()
}
return nil
}
// PublishTx broadcasts a Transaction.
func (n *Host) PublishTx(tx *blockchain.Transaction) error {
data, err := json.Marshal(tx)
if err != nil {
return err
}
return n.txTopic.Publish(context.Background(), data)
}
// PublishBlock broadcasts a committed block so peers can sync.
func (n *Host) PublishBlock(b *blockchain.Block) error {
data, err := json.Marshal(b)
if err != nil {
return err
}
return n.blocksTopic.Publish(context.Background(), data)
}
// TxMsgs returns a channel of incoming Transactions from peers.
func (n *Host) TxMsgs(ctx context.Context) <-chan *blockchain.Transaction {
ch := make(chan *blockchain.Transaction, 64)
go func() {
defer close(ch)
for {
m, err := n.txSub.Next(ctx)
if err != nil {
return
}
if m.ReceivedFrom == n.h.ID() {
continue
}
var tx blockchain.Transaction
if err := json.Unmarshal(m.Data, &tx); err != nil {
continue
}
select {
case ch <- &tx:
case <-ctx.Done():
return
}
}
}()
return ch
}
// BlockMsg is a gossip-received block along with the peer that forwarded it
// to us. Used by the main node loop so gap-fill can ask the gossiper for the
// missing blocks between tip and the received one.
type BlockMsg struct {
Block *blockchain.Block
From peer.ID
}
// BlockMsgs returns a channel of committed blocks broadcast by peers.
// The channel item includes the forwarding peer ID so callers can drive
// gap-fill sync from whichever peer just proved it has the new tip.
func (n *Host) BlockMsgs(ctx context.Context) <-chan BlockMsg {
ch := make(chan BlockMsg, 64)
go func() {
defer close(ch)
for {
m, err := n.blocksSub.Next(ctx)
if err != nil {
return
}
if m.ReceivedFrom == n.h.ID() {
continue
}
var b blockchain.Block
if err := json.Unmarshal(m.Data, &b); err != nil {
continue
}
select {
case ch <- BlockMsg{Block: &b, From: m.ReceivedFrom}:
case <-ctx.Done():
return
}
}
}()
return ch
}
// PeerCount returns number of connected peers.
func (n *Host) PeerCount() int {
return len(n.h.Network().Peers())
}
// Peers returns all connected peer IDs.
func (n *Host) Peers() []peer.ID {
return n.h.Network().Peers()
}
// LibP2PHost exposes the underlying host for the sync protocol.
func (n *Host) LibP2PHost() host.Host {
return n.h
}
// GossipSub returns the underlying PubSub instance so callers can join
// additional topics (e.g. the relay envelope topic).
func (n *Host) GossipSub() *pubsub.PubSub {
return n.ps
}
// AddrStrings returns all full multiaddrs for this host.
func (n *Host) AddrStrings() []string {
var out []string
for _, a := range n.h.Addrs() {
out = append(out, fmt.Sprintf("%s/p2p/%s", a, n.h.ID()))
}
return out
}
// ConnectedPeerInfo describes one currently-connected remote peer.
// Used by the /api/peers endpoint so new joiners can download a live seed
// list from any existing node and bootstrap their libp2p connectivity.
type ConnectedPeerInfo struct {
ID string `json:"id"`
Addrs []string `json:"addrs"`
}
// ConnectedPeers returns every peer in the network's current view with their
// full libp2p multiaddrs (suffixed with /p2p/<id>). Addresses come from the
// peerstore, which includes both dialed and received connections.
//
// Safe to call concurrently while the host is running; does not hold any
// lock beyond libp2p's internal peerstore lock.
func (n *Host) ConnectedPeers() []ConnectedPeerInfo {
peers := n.h.Network().Peers()
out := make([]ConnectedPeerInfo, 0, len(peers))
for _, pid := range peers {
addrs := n.h.Peerstore().Addrs(pid)
addrStrs := make([]string, 0, len(addrs))
for _, a := range addrs {
addrStrs = append(addrStrs, fmt.Sprintf("%s/p2p/%s", a, pid))
}
out = append(out, ConnectedPeerInfo{
ID: pid.String(),
Addrs: addrStrs,
})
}
return out
}
// Close shuts down the host.
func (n *Host) Close() error {
return n.h.Close()
}
// --- mDNS notifee ---
type mdnsNotifee struct{ node *Host }
func (m *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) {
if pi.ID == m.node.h.ID() {
return
}
log.Printf("[P2P] mDNS found peer %s — connecting", pi.ID)
if err := m.node.h.Connect(context.Background(), pi); err != nil {
log.Printf("[P2P] mDNS connect to %s failed: %v", pi.ID, err)
}
}

168
p2p/sync.go Normal file
View File

@@ -0,0 +1,168 @@
// Package p2p — chain sync protocol.
//
// Sync protocol "/dchain/sync/1.0.0":
//
// Request → {"from": N, "to": M}
// Response → newline-delimited JSON blocks (index N … M), then EOF
//
// Height protocol "/dchain/height/1.0.0":
//
// Request → (empty)
// Response → {"height": N}
package p2p
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"log"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"go-blockchain/blockchain"
)
const (
SyncProtocol = "/dchain/sync/1.0.0"
HeightProtocol = "/dchain/height/1.0.0"
syncTimeout = 30 * time.Second
)
type syncRequest struct {
From uint64 `json:"from"`
To uint64 `json:"to"`
}
type heightResponse struct {
Height uint64 `json:"height"`
}
// SetSyncHandler registers the block-sync stream handler.
// getBlock must be safe to call concurrently.
func (n *Host) SetSyncHandler(
getBlock func(index uint64) (*blockchain.Block, error),
getHeight func() uint64,
) {
n.h.SetStreamHandler(SyncProtocol, func(s network.Stream) {
defer s.Close()
if err := s.SetDeadline(time.Now().Add(syncTimeout)); err != nil {
log.Printf("[SYNC] set deadline error: %v", err)
return
}
var req syncRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
return
}
log.Printf("[SYNC] serving blocks %d%d to %s", req.From, req.To, s.Conn().RemotePeer())
enc := json.NewEncoder(s)
for i := req.From; i <= req.To; i++ {
b, err := getBlock(i)
if err != nil {
break // peer asks for a block we don't have yet
}
if err := enc.Encode(b); err != nil {
return
}
}
})
n.h.SetStreamHandler(HeightProtocol, func(s network.Stream) {
defer s.Close()
if err := s.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
log.Printf("[SYNC] set height deadline error: %v", err)
return
}
resp := heightResponse{Height: getHeight()}
if err := json.NewEncoder(s).Encode(resp); err != nil {
log.Printf("[SYNC] encode height response error: %v", err)
}
})
}
// QueryPeerHeight returns the chain height of a connected peer.
func (n *Host) QueryPeerHeight(ctx context.Context, peerID peer.ID) (uint64, error) {
s, err := n.h.NewStream(ctx, peerID, HeightProtocol)
if err != nil {
return 0, fmt.Errorf("open height stream to %s: %w", peerID, err)
}
defer s.Close()
if err := s.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
return 0, fmt.Errorf("set height deadline: %w", err)
}
var resp heightResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
return 0, fmt.Errorf("decode height: %w", err)
}
return resp.Height, nil
}
// SyncBlocks fetches blocks [from, to] from a peer and returns them in order.
func (n *Host) SyncBlocks(ctx context.Context, peerID peer.ID, from, to uint64) ([]*blockchain.Block, error) {
s, err := n.h.NewStream(ctx, peerID, SyncProtocol)
if err != nil {
return nil, fmt.Errorf("open sync stream to %s: %w", peerID, err)
}
defer s.Close()
if err := s.SetDeadline(time.Now().Add(syncTimeout)); err != nil {
return nil, fmt.Errorf("set sync deadline: %w", err)
}
req := syncRequest{From: from, To: to}
if err := json.NewEncoder(s).Encode(req); err != nil {
return nil, fmt.Errorf("send sync req: %w", err)
}
if err := s.CloseWrite(); err != nil {
return nil, fmt.Errorf("close sync write: %w", err)
}
var blocks []*blockchain.Block
scanner := bufio.NewScanner(io.LimitReader(s, 100<<20)) // 100 MiB max
scanner.Buffer(make([]byte, 1<<20), 1<<20)
for scanner.Scan() {
var b blockchain.Block
if err := json.Unmarshal(scanner.Bytes(), &b); err != nil {
return nil, fmt.Errorf("decode block: %w", err)
}
blocks = append(blocks, &b)
}
return blocks, scanner.Err()
}
// SyncFromPeerFull syncs all blocks that the peer has but we don't.
// localCount = number of blocks we already have (0 if empty, N if we have blocks 0..N-1).
// The peer reports its own block count; we fetch [localCount .. peerCount-1].
// Each block is passed to applyFn in ascending index order.
// Returns the number of blocks synced.
func (n *Host) SyncFromPeerFull(ctx context.Context, peerID peer.ID, localCount uint64, applyFn func(*blockchain.Block) error) (int, error) {
peerCount, err := n.QueryPeerHeight(ctx, peerID)
if err != nil {
return 0, fmt.Errorf("query height: %w", err)
}
if peerCount <= localCount {
return 0, nil // already up to date
}
from := localCount // first missing block index
to := peerCount - 1 // last block index peer has
log.Printf("[SYNC] syncing blocks %d%d from peer %s", from, to, peerID)
blocks, err := n.SyncBlocks(ctx, peerID, from, to)
if err != nil {
return 0, err
}
for _, b := range blocks {
if err := applyFn(b); err != nil {
return 0, fmt.Errorf("apply block #%d: %w", b.Index, err)
}
}
return len(blocks), nil
}

220
p2p/version_gossip.go Normal file
View File

@@ -0,0 +1,220 @@
// Package p2p — peer version discovery via gossipsub.
//
// What this solves
// ────────────────
// A decentralized node fleet has no registry telling each operator what
// version everyone else is running. Without that knowledge:
//
// • We can't decide when it's safe to activate a new feature-flag tx
// (§5.2 of UPDATE_STRATEGY.md) — activation must wait until ≥N% of
// the network has the new binary.
// • Operators can't see at a glance "am I the one holding back an
// upgrade?" — because their node's Explorer had no way to ask peers.
// • Clients can't warn the user "this node is running a pre-channels
// build" without making N extra HTTP round-trips.
//
// How it works
// ────────────
// A small gossipsub topic — `dchain/version/v1` — carries a JSON blob from
// each node:
//
// {
// "peer_id": "12D3KooW…",
// "tag": "v0.5.1",
// "commit": "abc1234…",
// "protocol_version": 1,
// "timestamp": 1715000000
// }
//
// Every node:
// 1. Publishes its own blob every 60 seconds.
// 2. Subscribes to the topic and keeps a bounded in-memory map
// peer.ID → latest announce.
// 3. Evicts entries older than 15 minutes (peer disconnect / stale).
//
// Messages are unsigned and advisory — a peer lying about its version is
// detectable when their blocks/txs use unsupported fields (consensus will
// reject), so we don't add a signature layer here. The map is pure UX.
//
// Memory budget: ~200 bytes per peer × bounded by connected peer count.
// Topic traffic: ~300 bytes every 60s per peer → trivial for a libp2p fleet.
package p2p
import (
"context"
"encoding/json"
"log"
"sync"
"time"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"go-blockchain/node/version"
)
// TopicVersion is the gossipsub topic for peer-version announces.
const TopicVersion = "dchain/version/v1"
const (
versionGossipInterval = 60 * time.Second
versionGossipTTL = 15 * time.Minute
)
// PeerVersion is one peer's self-reported identity.
type PeerVersion struct {
PeerID string `json:"peer_id"`
Tag string `json:"tag"`
Commit string `json:"commit"`
ProtocolVersion int `json:"protocol_version"`
Timestamp int64 `json:"timestamp"`
ReceivedAt time.Time `json:"received_at,omitempty"`
}
// versionAnnouncer is wired into Host via StartVersionGossip. Holds the
// publish topic + subscription + the latest-seen map under its own mutex,
// so read path (PeerVersions) is lock-free against publish.
type versionAnnouncer struct {
h *Host
topic *pubsub.Topic
sub *pubsub.Subscription
protoVer int
mu sync.RWMutex
latest map[peer.ID]PeerVersion
}
// StartVersionGossip joins the version topic, spawns the publisher loop and
// the subscriber loop, and returns. Both goroutines run until ctx is done.
//
// Call exactly once per Host. protocolVersion should be node.ProtocolVersion
// (the compile-time wire-protocol const) — threaded through as an int to
// avoid an import cycle (p2p → node would be circular; node → p2p already
// exists via the host injection).
func (n *Host) StartVersionGossip(ctx context.Context, protocolVersion int) error {
topic, err := n.ps.Join(TopicVersion)
if err != nil {
return err
}
sub, err := topic.Subscribe()
if err != nil {
return err
}
va := &versionAnnouncer{
h: n,
topic: topic,
sub: sub,
protoVer: protocolVersion,
latest: make(map[peer.ID]PeerVersion),
}
n.versionAnnouncer = va
go va.publishLoop(ctx)
go va.subscribeLoop(ctx)
go va.evictLoop(ctx)
return nil
}
// PeerVersions returns a snapshot of every peer's last-known version.
// Result is a copy — caller can iterate without a lock.
func (n *Host) PeerVersions() map[string]PeerVersion {
if n.versionAnnouncer == nil {
return nil
}
va := n.versionAnnouncer
va.mu.RLock()
defer va.mu.RUnlock()
out := make(map[string]PeerVersion, len(va.latest))
for pid, v := range va.latest {
out[pid.String()] = v
}
return out
}
func (va *versionAnnouncer) publishLoop(ctx context.Context) {
// First publish immediately so peers who just joined learn our version
// without a minute of lag.
va.publishOnce(ctx)
t := time.NewTicker(versionGossipInterval)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
va.publishOnce(ctx)
}
}
}
func (va *versionAnnouncer) publishOnce(ctx context.Context) {
msg := PeerVersion{
PeerID: va.h.h.ID().String(),
Tag: version.Tag,
Commit: version.Commit,
ProtocolVersion: va.protoVer,
Timestamp: time.Now().Unix(),
}
b, err := json.Marshal(msg)
if err != nil {
log.Printf("[P2P] version gossip marshal: %v", err)
return
}
pubCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
if err := va.topic.Publish(pubCtx, b); err != nil {
log.Printf("[P2P] version gossip publish: %v", err)
}
}
func (va *versionAnnouncer) subscribeLoop(ctx context.Context) {
for {
m, err := va.sub.Next(ctx)
if err != nil {
if ctx.Err() != nil {
return
}
log.Printf("[P2P] version gossip recv: %v", err)
continue
}
// Skip our own broadcasts — gossipsub delivers them back to us by
// default. Without this we'd overwrite our own "received" timestamp
// every minute and clutter metrics.
if m.ReceivedFrom == va.h.h.ID() {
continue
}
var pv PeerVersion
if err := json.Unmarshal(m.Data, &pv); err != nil {
log.Printf("[P2P] version gossip bad msg from %s: %v", m.ReceivedFrom, err)
continue
}
// Source validation: the peer ID inside the message must match the
// peer that sent it. Otherwise a node could spoof "version" rows
// for peers it doesn't control, confusing the UX.
if pv.PeerID != m.ReceivedFrom.String() {
continue
}
pv.ReceivedAt = time.Now()
va.mu.Lock()
va.latest[m.ReceivedFrom] = pv
va.mu.Unlock()
}
}
func (va *versionAnnouncer) evictLoop(ctx context.Context) {
t := time.NewTicker(versionGossipTTL / 3)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case now := <-t.C:
cutoff := now.Add(-versionGossipTTL)
va.mu.Lock()
for pid, v := range va.latest {
if v.ReceivedAt.Before(cutoff) {
delete(va.latest, pid)
}
}
va.mu.Unlock()
}
}
}