chore: initial commit for v0.0.1

DChain single-node blockchain + React Native messenger client.

Core:
- PBFT consensus with multi-sig validator admission + equivocation slashing
- BadgerDB + schema migration scaffold (CurrentSchemaVersion=0)
- libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1)
- Native Go contracts (username_registry) alongside WASM (wazero)
- WebSocket gateway with topic-based fanout + Ed25519-nonce auth
- Relay mailbox with NaCl envelope encryption (X25519 + Ed25519)
- Prometheus /metrics, per-IP rate limit, body-size cap

Deployment:
- Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus
- 3-node dev compose (docker-compose.yml) with mocked internet topology
- 3-validator prod compose (deploy/prod/) for federation
- Auto-update from Gitea via /api/update-check + systemd timer
- Build-time version injection (ldflags → node --version)
- UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER)

Client (client-app/):
- Expo / React Native / NativeWind
- E2E NaCl encryption, typing indicator, contact requests
- Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch

Documentation:
- README.md, CHANGELOG.md, CONTEXT.md
- deploy/single/README.md with 6 operator scenarios
- deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design
- docs/contracts/*.md per contract
This commit is contained in:
vsecoder
2026-04-17 14:16:44 +03:00
commit 7e7393e4f8
196 changed files with 55947 additions and 0 deletions

883
consensus/pbft.go Normal file
View File

@@ -0,0 +1,883 @@
// Package consensus implements PBFT (Practical Byzantine Fault Tolerance)
// in the Tendermint style: Pre-prepare → Prepare → Commit.
//
// Safety: block committed only after 2f+1 COMMIT votes (f = max faulty nodes)
// Liveness: view-change if no commit within blockTimeout
package consensus
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"log"
"sync"
"time"
"go-blockchain/blockchain"
"go-blockchain/identity"
)
const (
phaseNone = 0
phasePrepare = 1 // received PRE-PREPARE, broadcast PREPARE
phaseCommit = 2 // have 2f+1 PREPARE, broadcast COMMIT
blockTimeout = 2 * time.Second
)
// CommitCallback is called when a block reaches 2f+1 COMMIT votes.
type CommitCallback func(block *blockchain.Block)
// Engine is a single-node PBFT consensus engine.
type Engine struct {
mu sync.Mutex
id *identity.Identity
validators []string // sorted hex pub keys of all validators
view uint64 // current PBFT view (increments on view-change)
seqNum uint64 // index of the next block we expect to propose/commit
// in-flight round state
phase int
proposal *blockchain.Block
prepareVotes map[string]bool
commitVotes map[string]bool
onCommit CommitCallback
// send broadcasts a ConsensusMsg to all peers (via P2P layer).
send func(msg *blockchain.ConsensusMsg)
// liveness tracks the last seqNum we saw a commit vote from each
// validator. Used by LivenessReport to surface stale peers in /metrics
// and logs. In-memory only — restarting the node resets counters,
// which is fine because auto-removal requires multiple nodes to
// independently agree anyway.
livenessMu sync.RWMutex
liveness map[string]uint64 // pubkey → last seqNum
// seenVotes records the first PREPARE/COMMIT we saw from each validator
// for each (type, view, seqNum). If a second message arrives with a
// different BlockHash, it's equivocation evidence — we stash both so
// an operator (or future auto-slasher) can submit a SLASH tx.
// Bounded implicitly: entries are pruned as we advance past the seqNum.
evidenceMu sync.Mutex
seenVotes map[voteKey]*blockchain.ConsensusMsg
pendingEvidence []blockchain.EquivocationEvidence
// Mempool is fair-queued per sender: each address has its own FIFO
// queue and Propose drains them round-robin. Without this, one
// spammer's txs go in first and can starve everyone else for the
// duration of the flood.
//
// senderQueues[from] — per-address FIFO of uncommitted txs
// senderOrder — iteration order for round-robin draining
// seenIDs — O(1) dedup set across all queues
pendingMu sync.Mutex
senderQueues map[string][]*blockchain.Transaction
senderOrder []string
seenIDs map[string]struct{}
timer *time.Timer
// optional stats hooks — called outside the lock
hookPropose func()
hookVote func()
hookViewChange func()
}
// OnPropose registers a hook called each time this node proposes a block.
func (e *Engine) OnPropose(fn func()) { e.hookPropose = fn }
// OnVote registers a hook called each time this node casts a PREPARE or COMMIT vote.
func (e *Engine) OnVote(fn func()) { e.hookVote = fn }
// OnViewChange registers a hook called each time a view-change is triggered.
func (e *Engine) OnViewChange(fn func()) { e.hookViewChange = fn }
// NewEngine creates a PBFT engine.
// - validators: complete validator set (including this node)
// - seqNum: tip.Index + 1 (or 0 if chain is empty)
// - onCommit: called when a block is finalised
// - send: broadcast function (gossipsub publish)
func NewEngine(
id *identity.Identity,
validators []string,
seqNum uint64,
onCommit CommitCallback,
send func(*blockchain.ConsensusMsg),
) *Engine {
return &Engine{
id: id,
validators: validators,
seqNum: seqNum,
onCommit: onCommit,
send: send,
senderQueues: make(map[string][]*blockchain.Transaction),
seenIDs: make(map[string]struct{}),
liveness: make(map[string]uint64),
seenVotes: make(map[voteKey]*blockchain.ConsensusMsg),
}
}
// recordVote stores the vote and checks whether it conflicts with a prior
// vote from the same validator at the same consensus position. If so, it
// pushes the pair onto pendingEvidence for later retrieval via
// TakeEvidence.
//
// Only PREPARE and COMMIT are checked. PRE-PREPARE equivocation can happen
// legitimately during view changes, so we don't flag it.
func (e *Engine) recordVote(msg *blockchain.ConsensusMsg) {
if msg.Type != blockchain.MsgPrepare && msg.Type != blockchain.MsgCommit {
return
}
k := voteKey{from: msg.From, typ: msg.Type, view: msg.View, seqNum: msg.SeqNum}
e.evidenceMu.Lock()
defer e.evidenceMu.Unlock()
prev, seen := e.seenVotes[k]
if !seen {
// First message at this position from this validator — record.
msgCopy := *msg
e.seenVotes[k] = &msgCopy
return
}
if bytesEqualBlockHash(prev.BlockHash, msg.BlockHash) {
return // same vote, not equivocation
}
// Equivocation detected.
log.Printf("[PBFT] EQUIVOCATION: %s signed two %v at view=%d seq=%d (blocks %x vs %x)",
shortKey(msg.From), msg.Type, msg.View, msg.SeqNum,
prev.BlockHash[:min(4, len(prev.BlockHash))],
msg.BlockHash[:min(4, len(msg.BlockHash))])
msgCopy := *msg
e.pendingEvidence = append(e.pendingEvidence, blockchain.EquivocationEvidence{
A: prev,
B: &msgCopy,
})
}
// TakeEvidence drains the collected equivocation evidence. Caller is
// responsible for deciding what to do with it (typically: wrap each into
// a SLASH tx and submit). Safe to call concurrently.
func (e *Engine) TakeEvidence() []blockchain.EquivocationEvidence {
e.evidenceMu.Lock()
defer e.evidenceMu.Unlock()
if len(e.pendingEvidence) == 0 {
return nil
}
out := e.pendingEvidence
e.pendingEvidence = nil
return out
}
// pruneOldVotes clears seenVotes entries below the given seqNum floor so
// memory doesn't grow unboundedly. Called after each commit.
func (e *Engine) pruneOldVotes(belowSeq uint64) {
e.evidenceMu.Lock()
defer e.evidenceMu.Unlock()
for k := range e.seenVotes {
if k.seqNum < belowSeq {
delete(e.seenVotes, k)
}
}
}
func bytesEqualBlockHash(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// MissedBlocks returns how many seqNums have passed since the given validator
// last contributed a commit vote. A missing entry is treated as "never seen",
// in which case we return the current seqNum — caller can decide what to do.
//
// Thread-safe; may be polled from a metrics/reporting goroutine.
func (e *Engine) MissedBlocks(pubKey string) uint64 {
e.livenessMu.RLock()
lastSeen, ok := e.liveness[pubKey]
e.livenessMu.RUnlock()
e.mu.Lock()
cur := e.seqNum
e.mu.Unlock()
if !ok {
return cur
}
if cur <= lastSeen {
return 0
}
return cur - lastSeen
}
// LivenessReport returns a snapshot of (validator, missedBlocks) for the
// full current set. Intended for the /metrics endpoint and ops dashboards.
func (e *Engine) LivenessReport() map[string]uint64 {
e.mu.Lock()
vals := make([]string, len(e.validators))
copy(vals, e.validators)
e.mu.Unlock()
out := make(map[string]uint64, len(vals))
for _, v := range vals {
out[v] = e.MissedBlocks(v)
}
return out
}
// noteLiveness records that `pubKey` contributed a commit at `seq`.
// Called from handleCommit whenever we see a matching vote.
func (e *Engine) noteLiveness(pubKey string, seq uint64) {
e.livenessMu.Lock()
if seq > e.liveness[pubKey] {
e.liveness[pubKey] = seq
}
e.livenessMu.Unlock()
}
// voteKey uniquely identifies a (validator, phase, round) tuple. Two
// messages sharing a voteKey but with different BlockHash are equivocation.
type voteKey struct {
from string
typ blockchain.MsgType
view uint64
seqNum uint64
}
// MaxTxsPerBlock caps how many transactions one proposal pulls from the
// mempool. Keeps block commit time bounded regardless of pending backlog.
// Combined with the round-robin drain, this also caps how many txs a
// single sender can get into one block to `ceil(MaxTxsPerBlock / senders)`.
const MaxTxsPerBlock = 200
// UpdateValidators hot-reloads the validator set. Safe to call concurrently.
// The new set takes effect on the next round (does not affect the current in-flight round).
func (e *Engine) UpdateValidators(validators []string) {
e.mu.Lock()
defer e.mu.Unlock()
e.validators = validators
log.Printf("[PBFT] validator set updated: %d validators", len(validators))
}
// SyncSeqNum updates the engine's expected next block index after a chain sync.
func (e *Engine) SyncSeqNum(next uint64) {
e.mu.Lock()
defer e.mu.Unlock()
if next > e.seqNum {
e.seqNum = next
e.phase = phaseNone
e.reclaimProposal()
e.proposal = nil
}
}
// AddTransaction validates and adds a tx to the pending mempool.
// Returns an error if the tx is invalid or a duplicate; the tx is silently
// dropped in both cases so callers can safely ignore the return value when
// forwarding gossip from untrusted peers.
func (e *Engine) AddTransaction(tx *blockchain.Transaction) error {
if err := validateTx(tx); err != nil {
return err
}
e.pendingMu.Lock()
defer e.pendingMu.Unlock()
// O(1) dedup across all per-sender queues.
if _, seen := e.seenIDs[tx.ID]; seen {
return fmt.Errorf("duplicate tx: %s", tx.ID)
}
// Route by sender. First tx from a new address extends the round-robin
// iteration order so later senders don't starve earlier ones.
if _, ok := e.senderQueues[tx.From]; !ok {
e.senderOrder = append(e.senderOrder, tx.From)
}
e.senderQueues[tx.From] = append(e.senderQueues[tx.From], tx)
e.seenIDs[tx.ID] = struct{}{}
return nil
}
// validateTx performs stateless transaction validation:
// - required fields present
// - fee at or above MinFee
// - Ed25519 signature valid over canonical bytes
func validateTx(tx *blockchain.Transaction) error {
if tx == nil {
return fmt.Errorf("nil transaction")
}
if tx.ID == "" || tx.From == "" || tx.Type == "" {
return fmt.Errorf("tx missing required fields (id/from/type)")
}
if tx.Fee < blockchain.MinFee {
return fmt.Errorf("tx fee %d < MinFee %d", tx.Fee, blockchain.MinFee)
}
// Delegate signature verification to identity.VerifyTx so that the
// canonical signing bytes are defined in exactly one place.
if err := identity.VerifyTx(tx); err != nil {
return fmt.Errorf("tx signature invalid: %w", err)
}
return nil
}
// requeueHead puts txs back at the FRONT of their sender's FIFO. Used when
// Propose aborted after draining (chain tip advanced under us) so the txs
// don't get moved to the back of the line through no fault of the sender.
// Preserves per-sender ordering within the given slice.
func (e *Engine) requeueHead(txs []*blockchain.Transaction) {
if len(txs) == 0 {
return
}
// Group by sender to preserve per-sender order.
bySender := make(map[string][]*blockchain.Transaction)
order := []string{}
for _, tx := range txs {
if _, ok := bySender[tx.From]; !ok {
order = append(order, tx.From)
}
bySender[tx.From] = append(bySender[tx.From], tx)
}
e.pendingMu.Lock()
defer e.pendingMu.Unlock()
for _, sender := range order {
if _, known := e.senderQueues[sender]; !known {
e.senderOrder = append(e.senderOrder, sender)
}
// Prepend: new slice = group + existing.
e.senderQueues[sender] = append(bySender[sender], e.senderQueues[sender]...)
}
}
// requeueTail puts txs at the BACK of their sender's FIFO, skipping any
// that are already seen. Used on view-change rescue — the tx has been in
// flight for a while and fairness dictates it shouldn't jump ahead of txs
// that arrived since. Returns the count actually requeued.
func (e *Engine) requeueTail(txs []*blockchain.Transaction) int {
e.pendingMu.Lock()
defer e.pendingMu.Unlock()
rescued := 0
for _, tx := range txs {
if _, seen := e.seenIDs[tx.ID]; seen {
continue
}
if _, ok := e.senderQueues[tx.From]; !ok {
e.senderOrder = append(e.senderOrder, tx.From)
}
e.senderQueues[tx.From] = append(e.senderQueues[tx.From], tx)
e.seenIDs[tx.ID] = struct{}{}
rescued++
}
return rescued
}
// HasPendingTxs reports whether there are uncommitted transactions in the mempool.
// Used by the block-production loop to skip proposals when there is nothing to commit.
func (e *Engine) HasPendingTxs() bool {
e.pendingMu.Lock()
defer e.pendingMu.Unlock()
for _, q := range e.senderQueues {
if len(q) > 0 {
return true
}
}
return false
}
// PruneTxs removes transactions that were committed in a block from the pending
// mempool. Must be called by the onCommit handler so that non-proposing validators
// don't re-propose transactions they received via gossip but didn't drain themselves.
func (e *Engine) PruneTxs(txs []*blockchain.Transaction) {
if len(txs) == 0 {
return
}
committed := make(map[string]bool, len(txs))
for _, tx := range txs {
committed[tx.ID] = true
}
e.pendingMu.Lock()
defer e.pendingMu.Unlock()
for sender, q := range e.senderQueues {
kept := q[:0]
for _, tx := range q {
if !committed[tx.ID] {
kept = append(kept, tx)
} else {
delete(e.seenIDs, tx.ID)
}
}
if len(kept) == 0 {
delete(e.senderQueues, sender)
} else {
e.senderQueues[sender] = kept
}
}
// Prune senderOrder of now-empty senders so iteration stays O(senders).
if len(e.senderOrder) > 0 {
pruned := e.senderOrder[:0]
for _, s := range e.senderOrder {
if _, ok := e.senderQueues[s]; ok {
pruned = append(pruned, s)
}
}
e.senderOrder = pruned
}
}
// IsLeader returns true if this node is leader for the current round.
// Leadership rotates: leader = validators[(seqNum + view) % n]
func (e *Engine) IsLeader() bool {
if len(e.validators) == 0 {
return false
}
idx := int(e.seqNum+e.view) % len(e.validators)
return e.validators[idx] == e.id.PubKeyHex()
}
// Propose builds a block and broadcasts a PRE-PREPARE message.
// Only the current leader calls this.
func (e *Engine) Propose(prevBlock *blockchain.Block) {
e.mu.Lock()
defer e.mu.Unlock()
if !e.IsLeader() || e.phase != phaseNone {
return
}
// Round-robin drain: take one tx from each sender's FIFO per pass,
// up to MaxTxsPerBlock. Guarantees that a spammer's 10k-tx queue can
// not starve a legitimate user who has just one tx pending.
e.pendingMu.Lock()
txs := make([]*blockchain.Transaction, 0, MaxTxsPerBlock)
for len(txs) < MaxTxsPerBlock {
drained := 0
for _, sender := range e.senderOrder {
q := e.senderQueues[sender]
if len(q) == 0 {
continue
}
txs = append(txs, q[0])
// Pop the head of this sender's queue.
q = q[1:]
if len(q) == 0 {
delete(e.senderQueues, sender)
} else {
e.senderQueues[sender] = q
}
drained++
if len(txs) >= MaxTxsPerBlock {
break
}
}
if drained == 0 {
break // no queues had any tx this pass → done
}
}
// Rebuild senderOrder keeping only senders who still have pending txs,
// so iteration cost stays O(active senders) on next round.
if len(e.senderOrder) > 0 {
keep := e.senderOrder[:0]
for _, s := range e.senderOrder {
if _, ok := e.senderQueues[s]; ok {
keep = append(keep, s)
}
}
e.senderOrder = keep
}
// seenIDs is left intact — those txs are now in-flight; PruneTxs in
// the commit callback will clear them once accepted.
e.pendingMu.Unlock()
var prevHash []byte
var idx uint64
if prevBlock != nil {
prevHash = prevBlock.Hash
idx = prevBlock.Index + 1
}
if idx != e.seqNum {
// Chain tip doesn't match our expected seqNum — return txs to mempool and wait for sync.
// requeueHead puts them back at the front of each sender's FIFO.
e.requeueHead(txs)
return
}
var totalFees uint64
for _, tx := range txs {
totalFees += tx.Fee
}
b := &blockchain.Block{
Index: idx,
Timestamp: time.Now().UTC(),
Transactions: txs,
PrevHash: prevHash,
Validator: e.id.PubKeyHex(),
TotalFees: totalFees,
}
b.ComputeHash()
b.Sign(e.id.PrivKey)
e.proposal = b
e.prepareVotes = make(map[string]bool)
e.commitVotes = make(map[string]bool)
// Broadcast PRE-PREPARE
e.send(e.signMsg(&blockchain.ConsensusMsg{
Type: blockchain.MsgPrePrepare,
View: e.view,
SeqNum: b.Index,
BlockHash: b.Hash,
Block: b,
}))
log.Printf("[PBFT] leader %s proposed block #%d hash=%s",
shortKey(e.id.PubKeyHex()), b.Index, b.HashHex()[:8])
if e.hookPropose != nil {
go e.hookPropose()
}
// Leader casts its own PREPARE vote immediately
e.castPrepare()
e.resetTimer()
}
// HandleMessage processes an incoming ConsensusMsg from a peer.
func (e *Engine) HandleMessage(msg *blockchain.ConsensusMsg) {
if err := e.verifyMsgSig(msg); err != nil {
log.Printf("[PBFT] bad sig from %s: %v", shortKey(msg.From), err)
return
}
if !e.isKnownValidator(msg.From) {
return
}
e.mu.Lock()
defer e.mu.Unlock()
switch msg.Type {
case blockchain.MsgPrePrepare:
e.handlePrePrepare(msg)
case blockchain.MsgPrepare:
e.handlePrepare(msg)
case blockchain.MsgCommit:
e.handleCommit(msg)
case blockchain.MsgViewChange:
e.handleViewChange(msg)
}
}
// --- phase handlers ---
func (e *Engine) handlePrePrepare(msg *blockchain.ConsensusMsg) {
if msg.View != e.view || msg.SeqNum != e.seqNum {
return
}
if e.phase != phaseNone || msg.Block == nil {
return
}
// Verify that block hash matches its canonical content
msg.Block.ComputeHash()
if !hashEqual(msg.Block.Hash, msg.BlockHash) {
log.Printf("[PBFT] PRE-PREPARE: block hash mismatch")
return
}
e.proposal = msg.Block
e.prepareVotes = make(map[string]bool)
e.commitVotes = make(map[string]bool)
log.Printf("[PBFT] %s accepted PRE-PREPARE for block #%d",
shortKey(e.id.PubKeyHex()), msg.SeqNum)
e.castPrepare()
e.resetTimer()
}
// castPrepare adds own PREPARE vote and broadcasts; advances phase if quorum.
// Must be called with e.mu held.
func (e *Engine) castPrepare() {
e.phase = phasePrepare
e.prepareVotes[e.id.PubKeyHex()] = true
if e.hookVote != nil {
go e.hookVote()
}
e.send(e.signMsg(&blockchain.ConsensusMsg{
Type: blockchain.MsgPrepare,
View: e.view,
SeqNum: e.proposal.Index,
BlockHash: e.proposal.Hash,
}))
if e.quorum(len(e.prepareVotes)) {
e.advanceToCommit()
}
}
func (e *Engine) handlePrepare(msg *blockchain.ConsensusMsg) {
// Equivocation check runs BEFORE the view/proposal filter — we want to
// catch votes for a different block even if we've already moved on.
e.recordVote(msg)
if msg.View != e.view || e.proposal == nil {
return
}
if !hashEqual(msg.BlockHash, e.proposal.Hash) {
return
}
e.prepareVotes[msg.From] = true
if e.phase == phasePrepare && e.quorum(len(e.prepareVotes)) {
e.advanceToCommit()
}
}
// advanceToCommit transitions to COMMIT phase and casts own COMMIT vote.
// Must be called with e.mu held.
func (e *Engine) advanceToCommit() {
e.phase = phaseCommit
e.commitVotes[e.id.PubKeyHex()] = true
// Self-liveness: we're about to broadcast COMMIT, so count ourselves
// as participating in this seqNum. Without this our own pubkey would
// always show "missed blocks = current seqNum" in LivenessReport.
e.noteLiveness(e.id.PubKeyHex(), e.seqNum)
if e.hookVote != nil {
go e.hookVote()
}
e.send(e.signMsg(&blockchain.ConsensusMsg{
Type: blockchain.MsgCommit,
View: e.view,
SeqNum: e.proposal.Index,
BlockHash: e.proposal.Hash,
}))
log.Printf("[PBFT] %s sent COMMIT for block #%d (prepare quorum %d/%d)",
shortKey(e.id.PubKeyHex()), e.proposal.Index,
len(e.prepareVotes), len(e.validators))
e.tryFinalize()
}
func (e *Engine) handleCommit(msg *blockchain.ConsensusMsg) {
e.recordVote(msg)
if msg.View != e.view || e.proposal == nil {
return
}
if !hashEqual(msg.BlockHash, e.proposal.Hash) {
return
}
e.commitVotes[msg.From] = true
// Record liveness so we know which validators are still participating.
// msg.SeqNum reflects the block being committed; use it directly.
e.noteLiveness(msg.From, msg.SeqNum)
e.tryFinalize()
}
// tryFinalize commits the block if commit quorum is reached.
// Must be called with e.mu held.
func (e *Engine) tryFinalize() {
if e.phase != phaseCommit || !e.quorum(len(e.commitVotes)) {
return
}
committed := e.proposal
e.proposal = nil
e.phase = phaseNone
e.seqNum++
// Drop recorded votes for previous seqNums so the equivocation-
// detection map doesn't grow unboundedly. Keep the current seqNum
// in case a late duplicate arrives.
e.pruneOldVotes(e.seqNum)
if e.timer != nil {
e.timer.Stop()
}
log.Printf("[PBFT] COMMITTED block #%d hash=%s validator=%s fees=%d µT (commit votes %d/%d)",
committed.Index, committed.HashHex()[:8],
shortKey(committed.Validator),
committed.TotalFees,
len(e.commitVotes), len(e.validators))
go e.onCommit(committed) // call outside lock
}
func (e *Engine) handleViewChange(msg *blockchain.ConsensusMsg) {
if msg.View > e.view {
e.view = msg.View
e.phase = phaseNone
e.reclaimProposal()
e.proposal = nil
log.Printf("[PBFT] view-change to view %d (new leader: %s)",
e.view, shortKey(e.currentLeader()))
}
}
// reclaimProposal moves transactions from the in-flight proposal back into the
// pending mempool so they are not permanently lost on a view-change or timeout.
// Must be called with e.mu held; acquires pendingMu internally.
func (e *Engine) reclaimProposal() {
if e.proposal == nil || len(e.proposal.Transactions) == 0 {
return
}
// Re-enqueue via the helper so per-sender FIFO + dedup invariants hold.
// Any tx already in-flight (still in seenIDs) is skipped; the rest land
// at the TAIL of the sender's queue — they're "older" than whatever the
// user sent since; it's a loss of order, but the alternative (HEAD
// insert) would starve later arrivals.
rescued := e.requeueTail(e.proposal.Transactions)
if rescued > 0 {
log.Printf("[PBFT] reclaimed %d tx(s) from abandoned proposal #%d back to mempool",
rescued, e.proposal.Index)
}
}
// --- helpers ---
func (e *Engine) quorum(count int) bool {
n := len(e.validators)
if n == 0 {
return false
}
needed := (2*n + 2) / 3 // ⌈2n/3⌉
return count >= needed
}
func (e *Engine) currentLeader() string {
if len(e.validators) == 0 {
return ""
}
return e.validators[int(e.seqNum+e.view)%len(e.validators)]
}
func (e *Engine) isKnownValidator(pubKeyHex string) bool {
for _, v := range e.validators {
if v == pubKeyHex {
return true
}
}
return false
}
func (e *Engine) resetTimer() {
if e.timer != nil {
e.timer.Stop()
}
e.timer = time.AfterFunc(blockTimeout, func() {
e.mu.Lock()
defer e.mu.Unlock()
if e.phase == phaseNone {
return
}
// Count votes from OTHER validators (not ourselves).
// If we received zero foreign votes the peer is simply not connected yet —
// advancing the view would desync us (we'd be in view N+1, the peer in view 0).
// Instead: silently reset and let the next proposal tick retry in the same view.
otherVotes := 0
ownKey := e.id.PubKeyHex()
if e.phase == phasePrepare {
for k := range e.prepareVotes {
if k != ownKey {
otherVotes++
}
}
} else { // phaseCommit
for k := range e.commitVotes {
if k != ownKey {
otherVotes++
}
}
}
if otherVotes == 0 {
// No peer participation — peer is offline/not yet connected.
// Reset without a view-change so both sides stay in view 0
// and can agree as soon as the peer comes up.
log.Printf("[PBFT] timeout in view %d seq %d — no peer votes, retrying in same view",
e.view, e.seqNum)
e.phase = phaseNone
e.reclaimProposal()
e.proposal = nil
return
}
// Got votes from at least one peer but still timed out — real view-change.
log.Printf("[PBFT] timeout in view %d seq %d — triggering view-change",
e.view, e.seqNum)
e.view++
e.phase = phaseNone
e.reclaimProposal()
e.proposal = nil
if e.hookViewChange != nil {
go e.hookViewChange()
}
e.send(e.signMsg(&blockchain.ConsensusMsg{
Type: blockchain.MsgViewChange,
View: e.view,
SeqNum: e.seqNum,
}))
})
}
func (e *Engine) signMsg(msg *blockchain.ConsensusMsg) *blockchain.ConsensusMsg {
msg.From = e.id.PubKeyHex()
msg.Signature = e.id.Sign(msgSignBytes(msg))
return msg
}
func (e *Engine) verifyMsgSig(msg *blockchain.ConsensusMsg) error {
sig := msg.Signature
msg.Signature = nil
raw := msgSignBytes(msg)
msg.Signature = sig
ok, err := identity.Verify(msg.From, raw, sig)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("invalid signature")
}
return nil
}
func msgSignBytes(msg *blockchain.ConsensusMsg) []byte {
tmp := *msg
tmp.Signature = nil
tmp.Block = nil // block hash covers block content
data, _ := json.Marshal(tmp)
h := sha256.Sum256(data)
return h[:]
}
func hashEqual(a, b []byte) bool {
return hex.EncodeToString(a) == hex.EncodeToString(b)
}
func shortKey(h string) string {
if len(h) > 8 {
return h[:8]
}
return h
}