DChain single-node blockchain + React Native messenger client. Core: - PBFT consensus with multi-sig validator admission + equivocation slashing - BadgerDB + schema migration scaffold (CurrentSchemaVersion=0) - libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1) - Native Go contracts (username_registry) alongside WASM (wazero) - WebSocket gateway with topic-based fanout + Ed25519-nonce auth - Relay mailbox with NaCl envelope encryption (X25519 + Ed25519) - Prometheus /metrics, per-IP rate limit, body-size cap Deployment: - Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus - 3-node dev compose (docker-compose.yml) with mocked internet topology - 3-validator prod compose (deploy/prod/) for federation - Auto-update from Gitea via /api/update-check + systemd timer - Build-time version injection (ldflags → node --version) - UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER) Client (client-app/): - Expo / React Native / NativeWind - E2E NaCl encryption, typing indicator, contact requests - Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch Documentation: - README.md, CHANGELOG.md, CONTEXT.md - deploy/single/README.md with 6 operator scenarios - deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design - docs/contracts/*.md per contract
198 lines
7.0 KiB
Go
198 lines
7.0 KiB
Go
// Package blockchain — BadgerDB schema version tracking + migration scaffold.
|
|
//
|
|
// Why this exists
|
|
// ───────────────
|
|
// The chain's on-disk layout is a flat KV store with string-prefixed keys
|
|
// (see chain.go: prefixBalance, prefixChannel, etc.). Every breaking change
|
|
// to those prefixes or value shapes would otherwise require operators to
|
|
// wipe their volume and re-sync from scratch. That's painful at 10 nodes;
|
|
// catastrophic at 1000.
|
|
//
|
|
// This file introduces a single meta-key — `schema:ver` → uint32 — that
|
|
// records the layout version the data was written in. On every chain open:
|
|
//
|
|
// 1. We read the current version (0 if missing = fresh DB or pre-migration).
|
|
// 2. We iterate forward, running each migration[k→k+1] in order, bumping
|
|
// the stored version after each successful step.
|
|
// 3. If CurrentSchemaVersion is already reached, zero migrations run, the
|
|
// call is ~1 µs (single KV read).
|
|
//
|
|
// Design principles
|
|
// ────────────────
|
|
// • Idempotent: a crashed migration can be re-run from scratch. Every
|
|
// migration either completes its write AND updates the version in the
|
|
// SAME transaction, or neither.
|
|
// • Forward-only: downgrade is not supported. If an operator needs to
|
|
// roll back the binary, they restore from a pre-upgrade backup. The
|
|
// `update.sh` operator script checkpoints before restart for this.
|
|
// • Tiny: the migration registry is a plain Go slice, not a framework.
|
|
// Each migration is ~20 lines. Adding one is purely additive.
|
|
//
|
|
// As of this commit there are ZERO migrations (CurrentSchemaVersion = 0).
|
|
// The scaffolding ships empty so the very first real migration — whenever
|
|
// it lands — has a home that all deployed nodes already understand.
|
|
package blockchain
|
|
|
|
import (
|
|
"encoding/binary"
|
|
"fmt"
|
|
"log"
|
|
|
|
badger "github.com/dgraph-io/badger/v4"
|
|
)
|
|
|
|
const (
|
|
// schemaMetaKey is the single BadgerDB key that stores this DB's current
|
|
// schema version. Not prefixed like other keys — it's a bootstrap marker
|
|
// read before any prefixed query, so conflicts with userland prefixes
|
|
// are impossible by construction.
|
|
schemaMetaKey = "schema:ver"
|
|
|
|
// CurrentSchemaVersion is the layout this binary writes. Bumped in lockstep
|
|
// with every migration added below. A fresh DB is written at this version
|
|
// directly (no migration chain to run).
|
|
CurrentSchemaVersion uint32 = 0
|
|
)
|
|
|
|
// migration represents a single step from version v to v+1.
|
|
// Apply runs inside a single badger.Update — if it returns error, nothing
|
|
// is written, and the migration can be safely retried.
|
|
type migration struct {
|
|
From uint32
|
|
To uint32
|
|
Description string
|
|
Apply func(txn *badger.Txn) error
|
|
}
|
|
|
|
// migrations is the ordered forward-migration registry.
|
|
//
|
|
// To add a migration:
|
|
//
|
|
// 1. Bump CurrentSchemaVersion above.
|
|
// 2. Append an entry here with From = previous, To = new.
|
|
// 3. In Apply, walk the relevant prefixes and rewrite keys/values.
|
|
// 4. Add a unit test in schema_migrations_test.go seeding a vN-1 DB
|
|
// and asserting the vN invariants after one NewChain open.
|
|
//
|
|
// The slice is intentionally empty right now: the scaffold ships first,
|
|
// migrations land per-feature as needed.
|
|
var migrations = []migration{
|
|
// no migrations yet
|
|
}
|
|
|
|
// readSchemaVersion returns the version stored at schemaMetaKey, or 0 if the
|
|
// key is absent (interpretation: "pre-migration DB / fresh DB treat as v0").
|
|
func readSchemaVersion(db *badger.DB) (uint32, error) {
|
|
var v uint32
|
|
err := db.View(func(txn *badger.Txn) error {
|
|
item, err := txn.Get([]byte(schemaMetaKey))
|
|
if err == badger.ErrKeyNotFound {
|
|
v = 0
|
|
return nil
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return item.Value(func(val []byte) error {
|
|
if len(val) != 4 {
|
|
return fmt.Errorf("schema version has unexpected length %d (want 4)", len(val))
|
|
}
|
|
v = binary.BigEndian.Uint32(val)
|
|
return nil
|
|
})
|
|
})
|
|
return v, err
|
|
}
|
|
|
|
// writeSchemaVersion persists the given version under schemaMetaKey. Usually
|
|
// called inside the same txn that applied the corresponding migration, so
|
|
// version bump + data rewrite are atomic. runMigrations handles that.
|
|
func writeSchemaVersion(txn *badger.Txn, v uint32) error {
|
|
var buf [4]byte
|
|
binary.BigEndian.PutUint32(buf[:], v)
|
|
return txn.Set([]byte(schemaMetaKey), buf[:])
|
|
}
|
|
|
|
// runMigrations applies every registered migration forward from the stored
|
|
// version to CurrentSchemaVersion. Called by NewChain after badger.Open.
|
|
//
|
|
// Behavior:
|
|
// - stored == target → no-op, returns nil
|
|
// - stored < target → runs each migration[k→k+1] in sequence; if ANY
|
|
// returns error, the DB is left at the last successful version and the
|
|
// error is returned (no partial-migration corruption).
|
|
// - stored > target → FATAL: operator is running an older binary on a
|
|
// newer DB. Refuse to open rather than silently mis-interpret data.
|
|
func runMigrations(db *badger.DB) error {
|
|
cur, err := readSchemaVersion(db)
|
|
if err != nil {
|
|
return fmt.Errorf("read schema version: %w", err)
|
|
}
|
|
if cur == CurrentSchemaVersion {
|
|
return nil
|
|
}
|
|
if cur > CurrentSchemaVersion {
|
|
return fmt.Errorf(
|
|
"chain DB is at schema v%d but this binary only understands v%d — "+
|
|
"run a newer binary OR restore from a pre-upgrade backup",
|
|
cur, CurrentSchemaVersion)
|
|
}
|
|
|
|
log.Printf("[CHAIN] migrating schema v%d → v%d (%d steps)",
|
|
cur, CurrentSchemaVersion, CurrentSchemaVersion-cur)
|
|
|
|
for _, m := range migrations {
|
|
if m.From < cur {
|
|
continue
|
|
}
|
|
if m.From != cur {
|
|
return fmt.Errorf("migration gap: stored=v%d, next migration expects v%d",
|
|
cur, m.From)
|
|
}
|
|
if m.To != m.From+1 {
|
|
return fmt.Errorf("migration %d→%d is not a single step", m.From, m.To)
|
|
}
|
|
log.Printf("[CHAIN] migration v%d→v%d: %s", m.From, m.To, m.Description)
|
|
err := db.Update(func(txn *badger.Txn) error {
|
|
if err := m.Apply(txn); err != nil {
|
|
return err
|
|
}
|
|
return writeSchemaVersion(txn, m.To)
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("migration v%d→v%d failed: %w", m.From, m.To, err)
|
|
}
|
|
cur = m.To
|
|
}
|
|
|
|
// Fresh DB with no migrations yet to run — stamp the current version so
|
|
// we don't re-read "0 = no key" forever on later opens.
|
|
if cur < CurrentSchemaVersion {
|
|
err := db.Update(func(txn *badger.Txn) error {
|
|
return writeSchemaVersion(txn, CurrentSchemaVersion)
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("stamp schema version %d: %w", CurrentSchemaVersion, err)
|
|
}
|
|
}
|
|
|
|
// On a brand-new DB (no chain yet) cur is still 0 but
|
|
// CurrentSchemaVersion is also 0 (today), so nothing to stamp. When the
|
|
// first real migration lands, this stamp becomes active.
|
|
if CurrentSchemaVersion == 0 && cur == 0 {
|
|
err := db.Update(func(txn *badger.Txn) error {
|
|
// Only stamp if the key is absent — otherwise we already wrote it
|
|
// in the loop above.
|
|
if _, getErr := txn.Get([]byte(schemaMetaKey)); getErr == badger.ErrKeyNotFound {
|
|
return writeSchemaVersion(txn, CurrentSchemaVersion)
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("stamp initial schema version 0: %w", err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|