DChain single-node blockchain + React Native messenger client. Core: - PBFT consensus with multi-sig validator admission + equivocation slashing - BadgerDB + schema migration scaffold (CurrentSchemaVersion=0) - libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1) - Native Go contracts (username_registry) alongside WASM (wazero) - WebSocket gateway with topic-based fanout + Ed25519-nonce auth - Relay mailbox with NaCl envelope encryption (X25519 + Ed25519) - Prometheus /metrics, per-IP rate limit, body-size cap Deployment: - Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus - 3-node dev compose (docker-compose.yml) with mocked internet topology - 3-validator prod compose (deploy/prod/) for federation - Auto-update from Gitea via /api/update-check + systemd timer - Build-time version injection (ldflags → node --version) - UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER) Client (client-app/): - Expo / React Native / NativeWind - E2E NaCl encryption, typing indicator, contact requests - Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch Documentation: - README.md, CHANGELOG.md, CONTEXT.md - deploy/single/README.md with 6 operator scenarios - deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design - docs/contracts/*.md per contract
563 lines
15 KiB
Go
563 lines
15 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
badger "github.com/dgraph-io/badger/v4"
|
|
)
|
|
|
|
// Index key prefixes
|
|
const (
|
|
prefixTxRecord = "tx:" // tx:<txid> → TxRecord JSON
|
|
prefixTxByAddr = "txaddr:" // txaddr:<pubkey>:<block020d>:<txid> → "" (empty value)
|
|
prefixAddrMap = "addrmap:" // addrmap:<DCaddr> → pubkey hex
|
|
prefixNetStats = "netstats" // netstats → NetStats JSON
|
|
syntheticRewardIDPrefix = "sys-reward-"
|
|
)
|
|
|
|
// TxRecord wraps a Transaction with its on-chain context.
|
|
type TxRecord struct {
|
|
Tx *Transaction `json:"tx"`
|
|
BlockIndex uint64 `json:"block_index"`
|
|
BlockHash string `json:"block_hash"`
|
|
BlockTime time.Time `json:"block_time"`
|
|
GasUsed uint64 `json:"gas_used,omitempty"`
|
|
}
|
|
|
|
// NetStats are aggregate counters updated every block.
|
|
type NetStats struct {
|
|
TotalBlocks uint64 `json:"total_blocks"`
|
|
TotalTxs uint64 `json:"total_txs"`
|
|
TotalTransfers uint64 `json:"total_transfers"`
|
|
TotalRelayProofs uint64 `json:"total_relay_proofs"`
|
|
TotalSupply uint64 `json:"total_supply"` // µT ever minted via rewards + grants
|
|
ValidatorCount int `json:"validator_count"`
|
|
RelayCount int `json:"relay_count"`
|
|
}
|
|
|
|
// indexBlock is called inside AddBlock's db.Update() — indexes all transactions
|
|
// in the block and updates aggregate stats.
|
|
// gasUsed maps tx.ID → gas consumed for CALL_CONTRACT transactions.
|
|
func (c *Chain) indexBlock(txn *badger.Txn, b *Block, gasUsed map[string]uint64) error {
|
|
// Load existing stats
|
|
stats, err := c.readNetStats(txn)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
stats.TotalBlocks = b.Index + 1
|
|
// TotalSupply is fixed at GenesisAllocation; update it once at genesis.
|
|
if b.Index == 0 {
|
|
stats.TotalSupply = GenesisAllocation
|
|
}
|
|
|
|
for seq, tx := range b.Transactions {
|
|
// Store full TxRecord — but never overwrite an existing record.
|
|
// The same TX can appear in multiple gossiped blocks due to a mempool/PBFT
|
|
// race; the first block that actually applies it (via applyTx) will have
|
|
// gasUsed > 0. Subsequent re-indexings with an empty gasUsedByTx map
|
|
// would zero out the stored GasUsed. Skip if the record already exists.
|
|
recKey := []byte(prefixTxRecord + tx.ID)
|
|
if _, existErr := txn.Get(recKey); existErr == nil {
|
|
// TxRecord already written (from an earlier block or earlier call);
|
|
// do not overwrite it.
|
|
continue
|
|
}
|
|
// Chronological index entry (txchron:<block20d>:<seq04d> → tx_id).
|
|
// Lets RecentTxs iterate tx-by-tx instead of block-by-block so chains
|
|
// with many empty blocks still answer /api/txs/recent in O(limit).
|
|
chronKey := fmt.Sprintf("%s%020d:%04d", prefixTxChron, b.Index, seq)
|
|
if err := txn.Set([]byte(chronKey), []byte(tx.ID)); err != nil {
|
|
return err
|
|
}
|
|
gasForTx := gasUsed[tx.ID]
|
|
rec := TxRecord{
|
|
Tx: tx,
|
|
BlockIndex: b.Index,
|
|
BlockHash: b.HashHex(),
|
|
BlockTime: b.Timestamp,
|
|
GasUsed: gasForTx,
|
|
}
|
|
val, err := json.Marshal(rec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := txn.Set(recKey, val); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Index by sender
|
|
if tx.From != "" {
|
|
addrKey := txAddrKey(tx.From, b.Index, tx.ID)
|
|
if err := txn.Set([]byte(addrKey), []byte{}); err != nil {
|
|
return err
|
|
}
|
|
// Store addr → pubkey mapping
|
|
if err := c.storeAddrMap(txn, tx.From); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
// Index by recipient
|
|
if tx.To != "" && tx.To != tx.From {
|
|
addrKey := txAddrKey(tx.To, b.Index, tx.ID)
|
|
if err := txn.Set([]byte(addrKey), []byte{}); err != nil {
|
|
return err
|
|
}
|
|
if err := c.storeAddrMap(txn, tx.To); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
// Update aggregate counters
|
|
stats.TotalTxs++
|
|
switch tx.Type {
|
|
case EventTransfer:
|
|
stats.TotalTransfers++
|
|
case EventRelayProof:
|
|
stats.TotalRelayProofs++
|
|
}
|
|
}
|
|
|
|
// Index synthetic block reward only when the validator actually earned fees,
|
|
// or for the genesis block (one-time allocation). Empty blocks produce no
|
|
// state change and no income, so there is nothing useful to show.
|
|
if b.TotalFees > 0 || b.Index == 0 {
|
|
rewardTarget, err := c.resolveRewardTarget(txn, b.Validator)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
rewardTx, err := makeBlockRewardTx(b, rewardTarget)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
rewardRec := TxRecord{
|
|
Tx: rewardTx,
|
|
BlockIndex: b.Index,
|
|
BlockHash: b.HashHex(),
|
|
BlockTime: b.Timestamp,
|
|
}
|
|
rewardVal, err := json.Marshal(rewardRec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := txn.Set([]byte(prefixTxRecord+rewardTx.ID), rewardVal); err != nil {
|
|
return err
|
|
}
|
|
if rewardTx.From != "" {
|
|
if err := txn.Set([]byte(txAddrKey(rewardTx.From, b.Index, rewardTx.ID)), []byte{}); err != nil {
|
|
return err
|
|
}
|
|
if err := c.storeAddrMap(txn, rewardTx.From); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
if rewardTx.To != "" && rewardTx.To != rewardTx.From {
|
|
if err := txn.Set([]byte(txAddrKey(rewardTx.To, b.Index, rewardTx.ID)), []byte{}); err != nil {
|
|
return err
|
|
}
|
|
if err := c.storeAddrMap(txn, rewardTx.To); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
// Persist updated stats
|
|
return c.writeNetStats(txn, stats)
|
|
}
|
|
|
|
func makeBlockRewardTx(b *Block, rewardTarget string) (*Transaction, error) {
|
|
var memo string
|
|
if b.Index == 0 {
|
|
memo = fmt.Sprintf("Genesis allocation: %d µT", GenesisAllocation)
|
|
} else {
|
|
memo = fmt.Sprintf("Block fees: %d µT", b.TotalFees)
|
|
}
|
|
|
|
total := b.TotalFees
|
|
if b.Index == 0 {
|
|
total = GenesisAllocation
|
|
}
|
|
|
|
payload, err := json.Marshal(BlockRewardPayload{
|
|
ValidatorPubKey: b.Validator,
|
|
TargetPubKey: rewardTarget,
|
|
FeeReward: b.TotalFees,
|
|
TotalReward: total,
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
// From is intentionally left empty: a block reward is a synthetic, freshly
|
|
// minted allocation (fees collected by the network) rather than a transfer
|
|
// from an actual account. Leaving From="" prevents the reward from appearing
|
|
// as "validator paid themselves" in the explorer/client when the validator
|
|
// has no separate wallet binding (rewardTarget == b.Validator).
|
|
// b.Validator is still recorded inside the payload (BlockRewardPayload).
|
|
return &Transaction{
|
|
ID: fmt.Sprintf("%s%020d", syntheticRewardIDPrefix, b.Index),
|
|
Type: EventBlockReward,
|
|
From: "",
|
|
To: rewardTarget,
|
|
Amount: total,
|
|
Fee: 0,
|
|
Memo: memo,
|
|
Payload: payload,
|
|
Timestamp: b.Timestamp,
|
|
}, nil
|
|
}
|
|
|
|
// txAddrKey builds the composite key: txaddr:<pubkey>:<block_020d>:<txid>
|
|
func txAddrKey(pubKey string, blockIdx uint64, txID string) string {
|
|
return fmt.Sprintf("%s%s:%020d:%s", prefixTxByAddr, pubKey, blockIdx, txID)
|
|
}
|
|
|
|
// storeAddrMap stores a DC address → pubkey mapping.
|
|
func (c *Chain) storeAddrMap(txn *badger.Txn, pubKey string) error {
|
|
addr := pubKeyToAddr(pubKey)
|
|
return txn.Set([]byte(prefixAddrMap+addr), []byte(pubKey))
|
|
}
|
|
|
|
// pubKeyToAddr converts a hex Ed25519 public key to a DC address.
|
|
// Replicates wallet.PubKeyToAddress without importing the wallet package.
|
|
func pubKeyToAddr(pubKeyHex string) string {
|
|
raw, err := hex.DecodeString(pubKeyHex)
|
|
if err != nil {
|
|
return pubKeyHex // fallback: use pubkey as-is
|
|
}
|
|
h := sha256.Sum256(raw)
|
|
return "DC" + hex.EncodeToString(h[:12])
|
|
}
|
|
|
|
// --- Public query methods ---
|
|
|
|
// TxByID returns a TxRecord by transaction ID.
|
|
func (c *Chain) TxByID(txID string) (*TxRecord, error) {
|
|
var rec TxRecord
|
|
err := c.db.View(func(txn *badger.Txn) error {
|
|
item, err := txn.Get([]byte(prefixTxRecord + txID))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return item.Value(func(val []byte) error {
|
|
return json.Unmarshal(val, &rec)
|
|
})
|
|
})
|
|
if errors.Is(err, badger.ErrKeyNotFound) {
|
|
synth, synthErr := c.syntheticTxByID(txID)
|
|
if synthErr != nil {
|
|
return nil, synthErr
|
|
}
|
|
if synth != nil {
|
|
return synth, nil
|
|
}
|
|
return nil, nil
|
|
}
|
|
return &rec, err
|
|
}
|
|
|
|
func parseSyntheticRewardIndex(txID string) (uint64, bool) {
|
|
if !strings.HasPrefix(txID, syntheticRewardIDPrefix) {
|
|
return 0, false
|
|
}
|
|
part := strings.TrimPrefix(txID, syntheticRewardIDPrefix)
|
|
idx, err := strconv.ParseUint(part, 10, 64)
|
|
if err != nil {
|
|
return 0, false
|
|
}
|
|
return idx, true
|
|
}
|
|
|
|
func (c *Chain) syntheticTxByID(txID string) (*TxRecord, error) {
|
|
idx, ok := parseSyntheticRewardIndex(txID)
|
|
if !ok {
|
|
return nil, nil
|
|
}
|
|
b, err := c.GetBlock(idx)
|
|
if errors.Is(err, badger.ErrKeyNotFound) {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
rewardTarget := b.Validator
|
|
binding, err := c.WalletBinding(b.Validator)
|
|
if err == nil && binding != "" {
|
|
rewardTarget = binding
|
|
}
|
|
rewardTx, err := makeBlockRewardTx(b, rewardTarget)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return &TxRecord{
|
|
Tx: rewardTx,
|
|
BlockIndex: b.Index,
|
|
BlockHash: b.HashHex(),
|
|
BlockTime: b.Timestamp,
|
|
}, nil
|
|
}
|
|
|
|
// TxsByAddress returns up to limit TxRecords for a public key, newest first,
|
|
// skipping the first offset results (for pagination).
|
|
func (c *Chain) TxsByAddress(pubKey string, limit, offset int) ([]*TxRecord, error) {
|
|
if limit <= 0 {
|
|
limit = 50
|
|
}
|
|
if offset < 0 {
|
|
offset = 0
|
|
}
|
|
prefix := prefixTxByAddr + pubKey + ":"
|
|
|
|
// First: collect TxID keys for this address (newest first via reverse iter),
|
|
// skipping `offset` entries.
|
|
var txIDs []string
|
|
err := c.db.View(func(txn *badger.Txn) error {
|
|
opts := badger.DefaultIteratorOptions
|
|
opts.Reverse = true
|
|
opts.PrefetchValues = false
|
|
it := txn.NewIterator(opts)
|
|
defer it.Close()
|
|
|
|
seekKey := prefix + "\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
skipped := 0
|
|
for it.Seek([]byte(seekKey)); it.Valid(); it.Next() {
|
|
key := string(it.Item().Key())
|
|
if !strings.HasPrefix(key, prefix) {
|
|
break
|
|
}
|
|
parts := strings.SplitN(key[len(prefix):], ":", 2)
|
|
if len(parts) != 2 {
|
|
continue
|
|
}
|
|
if skipped < offset {
|
|
skipped++
|
|
continue
|
|
}
|
|
txIDs = append(txIDs, parts[1])
|
|
if len(txIDs) >= limit {
|
|
break
|
|
}
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Now fetch each TxRecord
|
|
var records []*TxRecord
|
|
err = c.db.View(func(txn *badger.Txn) error {
|
|
for _, txID := range txIDs {
|
|
item, err := txn.Get([]byte(prefixTxRecord + txID))
|
|
if errors.Is(err, badger.ErrKeyNotFound) {
|
|
continue
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var rec TxRecord
|
|
if err := item.Value(func(val []byte) error {
|
|
return json.Unmarshal(val, &rec)
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
records = append(records, &rec)
|
|
}
|
|
return nil
|
|
})
|
|
return records, err
|
|
}
|
|
|
|
// RecentTxs returns the N most recent transactions across all blocks.
|
|
func (c *Chain) RecentTxs(limit int) ([]*TxRecord, error) {
|
|
if limit <= 0 {
|
|
limit = 20
|
|
}
|
|
// Primary path: iterate the chronological tx index in reverse. This is
|
|
// O(limit) regardless of how many empty blocks sit between txs.
|
|
var records []*TxRecord
|
|
err := c.db.View(func(txn *badger.Txn) error {
|
|
opts := badger.DefaultIteratorOptions
|
|
opts.Reverse = true
|
|
opts.PrefetchValues = true
|
|
it := txn.NewIterator(opts)
|
|
defer it.Close()
|
|
|
|
// Seek to the highest possible key under this prefix.
|
|
seekKey := []byte(prefixTxChron + "\xff")
|
|
for it.Seek(seekKey); it.ValidForPrefix([]byte(prefixTxChron)); it.Next() {
|
|
if len(records) >= limit {
|
|
break
|
|
}
|
|
var txID string
|
|
err := it.Item().Value(func(v []byte) error {
|
|
txID = string(v)
|
|
return nil
|
|
})
|
|
if err != nil || txID == "" {
|
|
continue
|
|
}
|
|
recItem, err := txn.Get([]byte(prefixTxRecord + txID))
|
|
if err != nil {
|
|
continue
|
|
}
|
|
var rec TxRecord
|
|
if err := recItem.Value(func(v []byte) error { return json.Unmarshal(v, &rec) }); err != nil {
|
|
continue
|
|
}
|
|
records = append(records, &rec)
|
|
}
|
|
return nil
|
|
})
|
|
if err == nil && len(records) >= limit {
|
|
return records, nil
|
|
}
|
|
|
|
// Fallback (legacy + reward-tx injection): reverse-scan blocks.
|
|
// Only blocks committed BEFORE the chronological index existed will be
|
|
// found this way; we cap the scan so it can't hang.
|
|
tipIdx := c.TipIndex()
|
|
const maxBlockScan = 5000
|
|
|
|
seen := make(map[string]bool, len(records))
|
|
for _, r := range records {
|
|
seen[r.Tx.ID] = true
|
|
}
|
|
|
|
scanned := 0
|
|
for idx := int64(tipIdx); idx >= 0 && len(records) < limit && scanned < maxBlockScan; idx-- {
|
|
scanned++
|
|
b, err := c.GetBlock(uint64(idx))
|
|
if err != nil {
|
|
break
|
|
}
|
|
for i := len(b.Transactions) - 1; i >= 0 && len(records) < limit; i-- {
|
|
tx := b.Transactions[i]
|
|
if seen[tx.ID] {
|
|
continue
|
|
}
|
|
records = append(records, &TxRecord{
|
|
Tx: tx,
|
|
BlockIndex: b.Index,
|
|
BlockHash: b.HashHex(),
|
|
BlockTime: b.Timestamp,
|
|
})
|
|
}
|
|
// Include BLOCK_REWARD only for fee-earning blocks and genesis.
|
|
if len(records) < limit && (b.TotalFees > 0 || b.Index == 0) {
|
|
rewardTarget := b.Validator
|
|
if binding, err2 := c.WalletBinding(b.Validator); err2 == nil && binding != "" {
|
|
rewardTarget = binding
|
|
}
|
|
if rewardTx, err2 := makeBlockRewardTx(b, rewardTarget); err2 == nil {
|
|
records = append(records, &TxRecord{
|
|
Tx: rewardTx,
|
|
BlockIndex: b.Index,
|
|
BlockHash: b.HashHex(),
|
|
BlockTime: b.Timestamp,
|
|
})
|
|
}
|
|
}
|
|
}
|
|
return records, nil
|
|
}
|
|
|
|
// RecentBlocks returns the N most recent blocks (tip first).
|
|
func (c *Chain) RecentBlocks(limit int) ([]*Block, error) {
|
|
if limit <= 0 {
|
|
limit = 10
|
|
}
|
|
// Lock-free tip lookup so this endpoint never blocks on consensus work.
|
|
tipIdx := c.TipIndex()
|
|
var blocks []*Block
|
|
for idx := int64(tipIdx); idx >= 0 && len(blocks) < limit; idx-- {
|
|
b, err := c.GetBlock(uint64(idx))
|
|
if err != nil {
|
|
break
|
|
}
|
|
blocks = append(blocks, b)
|
|
}
|
|
return blocks, nil
|
|
}
|
|
|
|
// NetworkStats returns aggregate counters for the chain.
|
|
// ValidatorCount and RelayCount are always live-counted from the DB so they
|
|
// are accurate even after InitValidators replaced the set or relays registered.
|
|
func (c *Chain) NetworkStats() (NetStats, error) {
|
|
var stats NetStats
|
|
err := c.db.View(func(txn *badger.Txn) error {
|
|
s, err := c.readNetStats(txn)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
stats = s
|
|
|
|
opts := badger.DefaultIteratorOptions
|
|
opts.PrefetchValues = false
|
|
it := txn.NewIterator(opts)
|
|
defer it.Close()
|
|
|
|
vPrefix := []byte(prefixValidator)
|
|
for it.Seek(vPrefix); it.ValidForPrefix(vPrefix); it.Next() {
|
|
stats.ValidatorCount++
|
|
}
|
|
rPrefix := []byte(prefixRelay)
|
|
for it.Seek(rPrefix); it.ValidForPrefix(rPrefix); it.Next() {
|
|
stats.RelayCount++
|
|
}
|
|
return nil
|
|
})
|
|
return stats, err
|
|
}
|
|
|
|
// AddressToPubKey resolves a DC address to a pub key.
|
|
// Returns "" if not found.
|
|
func (c *Chain) AddressToPubKey(addr string) (string, error) {
|
|
var pubKey string
|
|
err := c.db.View(func(txn *badger.Txn) error {
|
|
item, err := txn.Get([]byte(prefixAddrMap + addr))
|
|
if errors.Is(err, badger.ErrKeyNotFound) {
|
|
return nil
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return item.Value(func(val []byte) error {
|
|
pubKey = string(val)
|
|
return nil
|
|
})
|
|
})
|
|
return pubKey, err
|
|
}
|
|
|
|
// --- internal ---
|
|
|
|
func (c *Chain) readNetStats(txn *badger.Txn) (NetStats, error) {
|
|
var s NetStats
|
|
item, err := txn.Get([]byte(prefixNetStats))
|
|
if errors.Is(err, badger.ErrKeyNotFound) {
|
|
return s, nil
|
|
}
|
|
if err != nil {
|
|
return s, err
|
|
}
|
|
err = item.Value(func(val []byte) error {
|
|
return json.Unmarshal(val, &s)
|
|
})
|
|
return s, err
|
|
}
|
|
|
|
func (c *Chain) writeNetStats(txn *badger.Txn, s NetStats) error {
|
|
val, err := json.Marshal(s)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return txn.Set([]byte(prefixNetStats), val)
|
|
}
|