DChain single-node blockchain + React Native messenger client. Core: - PBFT consensus with multi-sig validator admission + equivocation slashing - BadgerDB + schema migration scaffold (CurrentSchemaVersion=0) - libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1) - Native Go contracts (username_registry) alongside WASM (wazero) - WebSocket gateway with topic-based fanout + Ed25519-nonce auth - Relay mailbox with NaCl envelope encryption (X25519 + Ed25519) - Prometheus /metrics, per-IP rate limit, body-size cap Deployment: - Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus - 3-node dev compose (docker-compose.yml) with mocked internet topology - 3-validator prod compose (deploy/prod/) for federation - Auto-update from Gitea via /api/update-check + systemd timer - Build-time version injection (ldflags → node --version) - UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER) Client (client-app/): - Expo / React Native / NativeWind - E2E NaCl encryption, typing indicator, contact requests - Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch Documentation: - README.md, CHANGELOG.md, CONTEXT.md - deploy/single/README.md with 6 operator scenarios - deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design - docs/contracts/*.md per contract
176 lines
5.7 KiB
YAML
176 lines
5.7 KiB
YAML
name: dchain-prod
|
|
|
|
# ══════════════════════════════════════════════════════════════════════════
|
|
# DChain production stack.
|
|
#
|
|
# Layout:
|
|
# - 3 validator nodes, each with its own persistent volume and key file
|
|
# - Caddy reverse proxy on the edge: auto-HTTPS from Let's Encrypt,
|
|
# rewrites ws upgrades, round-robins /api/* across nodes
|
|
# - Prometheus + Grafana for observability (optional, profile=monitor)
|
|
#
|
|
# Quick start (1-host single-server):
|
|
# cp node.env.example node1.env # edit domain / pubkeys
|
|
# cp node.env.example node2.env
|
|
# cp node.env.example node3.env
|
|
# docker compose up -d # runs nodes + Caddy
|
|
# docker compose --profile monitor up -d # adds Prometheus + Grafana
|
|
#
|
|
# For multi-host (the realistic case), copy this file per VPS and remove
|
|
# the two nodes that aren't yours; Caddy can still live on one of them or
|
|
# on a dedicated edge box. Operators are expected to edit this file —
|
|
# it's a reference, not a magic turnkey.
|
|
#
|
|
# Key files:
|
|
# ./keys/node{1,2,3}.json — Ed25519 identity, bake in via bind mount
|
|
# ./caddy/Caddyfile — auto-HTTPS config
|
|
# ./node.env.example — ENV template
|
|
# ./prometheus.yml — scrape config
|
|
# ══════════════════════════════════════════════════════════════════════════
|
|
|
|
networks:
|
|
internet:
|
|
name: dchain_internet
|
|
driver: bridge
|
|
|
|
volumes:
|
|
node1_data:
|
|
node2_data:
|
|
node3_data:
|
|
caddy_data:
|
|
caddy_config:
|
|
prom_data:
|
|
grafana_data:
|
|
|
|
x-node-base: &node-base
|
|
build:
|
|
context: ../..
|
|
dockerfile: deploy/prod/Dockerfile.slim
|
|
restart: unless-stopped
|
|
networks: [internet]
|
|
# Drop all Linux capabilities — the node binary needs none.
|
|
cap_drop: [ALL]
|
|
# Read-only root FS; only /data is writable (volume-mounted).
|
|
read_only: true
|
|
tmpfs: [/tmp]
|
|
security_opt: [no-new-privileges:true]
|
|
# Health check hits /api/netstats through the local HTTP server.
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:8080/api/netstats >/dev/null || exit 1"]
|
|
interval: 10s
|
|
timeout: 3s
|
|
retries: 6
|
|
start_period: 15s
|
|
|
|
services:
|
|
node1:
|
|
<<: *node-base
|
|
container_name: dchain_node1
|
|
hostname: node1
|
|
env_file: ./node1.env
|
|
volumes:
|
|
- node1_data:/data
|
|
- ./keys/node1.json:/keys/node.json:ro
|
|
command:
|
|
- "--genesis" # drop --genesis after first boot
|
|
- "--db=/data/chain"
|
|
- "--mailbox-db=/data/mailbox"
|
|
- "--key=/keys/node.json"
|
|
- "--relay-key=/data/relay.json"
|
|
- "--listen=/ip4/0.0.0.0/tcp/4001"
|
|
- "--stats-addr=:8080"
|
|
- "--heartbeat=true"
|
|
- "--register-relay"
|
|
|
|
node2:
|
|
<<: *node-base
|
|
container_name: dchain_node2
|
|
hostname: node2
|
|
env_file: ./node2.env
|
|
depends_on:
|
|
node1: { condition: service_healthy }
|
|
volumes:
|
|
- node2_data:/data
|
|
- ./keys/node2.json:/keys/node.json:ro
|
|
command:
|
|
- "--db=/data/chain"
|
|
- "--mailbox-db=/data/mailbox"
|
|
- "--key=/keys/node.json"
|
|
- "--relay-key=/data/relay.json"
|
|
- "--listen=/ip4/0.0.0.0/tcp/4001"
|
|
- "--stats-addr=:8080"
|
|
- "--join=http://node1:8080" # bootstrap from node1
|
|
- "--register-relay"
|
|
|
|
node3:
|
|
<<: *node-base
|
|
container_name: dchain_node3
|
|
hostname: node3
|
|
env_file: ./node3.env
|
|
depends_on:
|
|
node1: { condition: service_healthy }
|
|
volumes:
|
|
- node3_data:/data
|
|
- ./keys/node3.json:/keys/node.json:ro
|
|
command:
|
|
- "--db=/data/chain"
|
|
- "--mailbox-db=/data/mailbox"
|
|
- "--key=/keys/node.json"
|
|
- "--relay-key=/data/relay.json"
|
|
- "--listen=/ip4/0.0.0.0/tcp/4001"
|
|
- "--stats-addr=:8080"
|
|
- "--join=http://node1:8080"
|
|
- "--register-relay"
|
|
|
|
# ── Edge: Caddy with auto-HTTPS + WS upgrade + load-balancing ────────────
|
|
caddy:
|
|
image: caddy:2.8-alpine
|
|
container_name: dchain_caddy
|
|
restart: unless-stopped
|
|
networks: [internet]
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
- "443:443/udp" # HTTP/3 / QUIC
|
|
volumes:
|
|
- ./caddy/Caddyfile:/etc/caddy/Caddyfile:ro
|
|
- caddy_data:/data
|
|
- caddy_config:/config
|
|
depends_on:
|
|
node1: { condition: service_healthy }
|
|
|
|
# ── Observability ────────────────────────────────────────────────────────
|
|
# Start these only when needed: `docker compose --profile monitor up -d`
|
|
|
|
prometheus:
|
|
profiles: [monitor]
|
|
image: prom/prometheus:v2.53.0
|
|
container_name: dchain_prometheus
|
|
restart: unless-stopped
|
|
networks: [internet]
|
|
volumes:
|
|
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
|
- prom_data:/prometheus
|
|
command:
|
|
- "--config.file=/etc/prometheus/prometheus.yml"
|
|
- "--storage.tsdb.retention.time=30d"
|
|
# No external port — exposed only to Grafana via internal network.
|
|
|
|
grafana:
|
|
profiles: [monitor]
|
|
image: grafana/grafana:11.1.0
|
|
container_name: dchain_grafana
|
|
restart: unless-stopped
|
|
networks: [internet]
|
|
ports:
|
|
- "3000:3000"
|
|
depends_on: [prometheus]
|
|
environment:
|
|
GF_SECURITY_ADMIN_USER: admin
|
|
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PW:-change-me}
|
|
GF_USERS_ALLOW_SIGN_UP: "false"
|
|
volumes:
|
|
- grafana_data:/var/lib/grafana
|
|
- ./grafana/datasources:/etc/grafana/provisioning/datasources:ro
|
|
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
|