chore: initial commit for v0.0.1
DChain single-node blockchain + React Native messenger client. Core: - PBFT consensus with multi-sig validator admission + equivocation slashing - BadgerDB + schema migration scaffold (CurrentSchemaVersion=0) - libp2p gossipsub (tx/v1, blocks/v1, relay/v1, version/v1) - Native Go contracts (username_registry) alongside WASM (wazero) - WebSocket gateway with topic-based fanout + Ed25519-nonce auth - Relay mailbox with NaCl envelope encryption (X25519 + Ed25519) - Prometheus /metrics, per-IP rate limit, body-size cap Deployment: - Single-node compose (deploy/single/) with Caddy TLS + optional Prometheus - 3-node dev compose (docker-compose.yml) with mocked internet topology - 3-validator prod compose (deploy/prod/) for federation - Auto-update from Gitea via /api/update-check + systemd timer - Build-time version injection (ldflags → node --version) - UI / Swagger toggle flags (DCHAIN_DISABLE_UI, DCHAIN_DISABLE_SWAGGER) Client (client-app/): - Expo / React Native / NativeWind - E2E NaCl encryption, typing indicator, contact requests - Auto-discovery of canonical contracts, chain_id aware, WS reconnect on node switch Documentation: - README.md, CHANGELOG.md, CONTEXT.md - deploy/single/README.md with 6 operator scenarios - deploy/UPDATE_STRATEGY.md with 4-layer forward-compat design - docs/contracts/*.md per contract
This commit is contained in:
94
client-app/hooks/useBalance.ts
Normal file
94
client-app/hooks/useBalance.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
/**
|
||||
* Balance hook — uses the WebSocket gateway to receive instant updates when
|
||||
* a tx involving the current address is committed, with HTTP polling as a
|
||||
* graceful fallback for old nodes that don't expose /api/ws.
|
||||
*
|
||||
* Flow:
|
||||
* 1. On mount: immediate HTTP fetch so the UI has a non-zero balance ASAP
|
||||
* 2. Subscribe to `addr:<my_pubkey>` on the WS hub
|
||||
* 3. On every `tx` event, re-fetch balance (cheap — one Badger read server-side)
|
||||
* 4. If WS disconnects for >15s, fall back to 10-second polling until it reconnects
|
||||
*/
|
||||
|
||||
import { useEffect, useCallback, useRef } from 'react';
|
||||
import { getBalance } from '@/lib/api';
|
||||
import { getWSClient } from '@/lib/ws';
|
||||
import { useStore } from '@/lib/store';
|
||||
|
||||
const FALLBACK_POLL_INTERVAL = 10_000; // HTTP poll when WS is down
|
||||
const WS_GRACE_BEFORE_POLLING = 15_000; // don't start polling immediately on disconnect
|
||||
|
||||
export function useBalance() {
|
||||
const keyFile = useStore(s => s.keyFile);
|
||||
const setBalance = useStore(s => s.setBalance);
|
||||
|
||||
const refresh = useCallback(async () => {
|
||||
if (!keyFile) return;
|
||||
try {
|
||||
const bal = await getBalance(keyFile.pub_key);
|
||||
setBalance(bal);
|
||||
} catch {
|
||||
// transient — next call will retry
|
||||
}
|
||||
}, [keyFile, setBalance]);
|
||||
|
||||
// --- fallback polling management ---
|
||||
const pollTimerRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||
const disconnectSinceRef = useRef<number | null>(null);
|
||||
const disconnectTORef = useRef<ReturnType<typeof setTimeout> | null>(null);
|
||||
|
||||
const startPolling = useCallback(() => {
|
||||
if (pollTimerRef.current) return;
|
||||
console.log('[useBalance] WS down for grace period — starting HTTP poll');
|
||||
refresh();
|
||||
pollTimerRef.current = setInterval(refresh, FALLBACK_POLL_INTERVAL);
|
||||
}, [refresh]);
|
||||
|
||||
const stopPolling = useCallback(() => {
|
||||
if (pollTimerRef.current) {
|
||||
clearInterval(pollTimerRef.current);
|
||||
pollTimerRef.current = null;
|
||||
}
|
||||
if (disconnectTORef.current) {
|
||||
clearTimeout(disconnectTORef.current);
|
||||
disconnectTORef.current = null;
|
||||
}
|
||||
disconnectSinceRef.current = null;
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!keyFile) return;
|
||||
const ws = getWSClient();
|
||||
|
||||
// Immediate HTTP fetch so the UI is not empty while the WS hello arrives.
|
||||
refresh();
|
||||
|
||||
// Refresh balance whenever a tx for our address is committed.
|
||||
const offTx = ws.subscribe('addr:' + keyFile.pub_key, (frame) => {
|
||||
if (frame.event === 'tx') {
|
||||
refresh();
|
||||
}
|
||||
});
|
||||
|
||||
// Manage fallback polling based on WS connection state.
|
||||
const offConn = ws.onConnectionChange((ok) => {
|
||||
if (ok) {
|
||||
stopPolling();
|
||||
refresh(); // catch up anything we missed while disconnected
|
||||
} else if (disconnectTORef.current === null) {
|
||||
disconnectSinceRef.current = Date.now();
|
||||
disconnectTORef.current = setTimeout(startPolling, WS_GRACE_BEFORE_POLLING);
|
||||
}
|
||||
});
|
||||
|
||||
ws.connect();
|
||||
|
||||
return () => {
|
||||
offTx();
|
||||
offConn();
|
||||
stopPolling();
|
||||
};
|
||||
}, [keyFile, refresh, startPolling, stopPolling]);
|
||||
|
||||
return { refresh };
|
||||
}
|
||||
80
client-app/hooks/useContacts.ts
Normal file
80
client-app/hooks/useContacts.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
/**
|
||||
* Contacts + inbound request tracking.
|
||||
*
|
||||
* - Loads cached contacts from local storage on boot.
|
||||
* - Subscribes to the address WS topic so a new CONTACT_REQUEST pulls the
|
||||
* relay contact list immediately (sub-second UX).
|
||||
* - Keeps a 30 s polling fallback for nodes without WS or while disconnected.
|
||||
*/
|
||||
|
||||
import { useEffect, useCallback } from 'react';
|
||||
import { fetchContactRequests } from '@/lib/api';
|
||||
import { getWSClient } from '@/lib/ws';
|
||||
import { loadContacts } from '@/lib/storage';
|
||||
import { useStore } from '@/lib/store';
|
||||
|
||||
const FALLBACK_POLL_INTERVAL = 30_000;
|
||||
|
||||
export function useContacts() {
|
||||
const keyFile = useStore(s => s.keyFile);
|
||||
const setContacts = useStore(s => s.setContacts);
|
||||
const setRequests = useStore(s => s.setRequests);
|
||||
const contacts = useStore(s => s.contacts);
|
||||
|
||||
// Load cached contacts from local storage once
|
||||
useEffect(() => {
|
||||
loadContacts().then(setContacts);
|
||||
}, [setContacts]);
|
||||
|
||||
const pollRequests = useCallback(async () => {
|
||||
if (!keyFile) return;
|
||||
try {
|
||||
const raw = await fetchContactRequests(keyFile.pub_key);
|
||||
|
||||
// Filter out already-accepted contacts
|
||||
const contactAddresses = new Set(contacts.map(c => c.address));
|
||||
|
||||
const requests = raw
|
||||
.filter(r => r.status === 'pending' && !contactAddresses.has(r.requester_pub))
|
||||
.map(r => ({
|
||||
from: r.requester_pub,
|
||||
// x25519Pub will be fetched from identity when user taps Accept
|
||||
x25519Pub: '',
|
||||
intro: r.intro ?? '',
|
||||
timestamp: r.created_at,
|
||||
txHash: r.tx_id,
|
||||
}));
|
||||
|
||||
setRequests(requests);
|
||||
} catch {
|
||||
// Ignore transient network errors
|
||||
}
|
||||
}, [keyFile, contacts, setRequests]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!keyFile) return;
|
||||
const ws = getWSClient();
|
||||
|
||||
// Initial load + low-frequency fallback poll (covers missed WS events,
|
||||
// works even when the node has no WS endpoint).
|
||||
pollRequests();
|
||||
const interval = setInterval(pollRequests, FALLBACK_POLL_INTERVAL);
|
||||
|
||||
// Immediate refresh when a CONTACT_REQUEST / ACCEPT_CONTACT tx addressed
|
||||
// to us lands on-chain. WS fan-out already filters to our address topic.
|
||||
const off = ws.subscribe('addr:' + keyFile.pub_key, (frame) => {
|
||||
if (frame.event === 'tx') {
|
||||
const d = frame.data as { tx_type?: string } | undefined;
|
||||
if (d?.tx_type === 'CONTACT_REQUEST' || d?.tx_type === 'ACCEPT_CONTACT') {
|
||||
pollRequests();
|
||||
}
|
||||
}
|
||||
});
|
||||
ws.connect();
|
||||
|
||||
return () => {
|
||||
clearInterval(interval);
|
||||
off();
|
||||
};
|
||||
}, [keyFile, pollRequests]);
|
||||
}
|
||||
123
client-app/hooks/useMessages.ts
Normal file
123
client-app/hooks/useMessages.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
/**
|
||||
* Subscribe to the relay inbox via WebSocket and decrypt incoming envelopes
|
||||
* for the active chat. Falls back to 30-second polling whenever the WS is
|
||||
* not connected — preserves correctness on older nodes or flaky networks.
|
||||
*
|
||||
* Flow:
|
||||
* 1. On mount: one HTTP fetch so we have whatever is already in the inbox
|
||||
* 2. Subscribe to topic `inbox:<my_x25519>` — the node pushes a summary
|
||||
* for each fresh envelope as soon as mailbox.Store() succeeds
|
||||
* 3. On each push, pull the full envelope list (cheap — bounded by
|
||||
* MailboxPerRecipientCap) and decrypt anything we haven't seen yet
|
||||
* 4. If WS disconnects for > 15 seconds, start a 30 s HTTP poll until it
|
||||
* reconnects
|
||||
*/
|
||||
|
||||
import { useEffect, useCallback, useRef } from 'react';
|
||||
import { fetchInbox } from '@/lib/api';
|
||||
import { getWSClient } from '@/lib/ws';
|
||||
import { decryptMessage } from '@/lib/crypto';
|
||||
import { appendMessage } from '@/lib/storage';
|
||||
import { useStore } from '@/lib/store';
|
||||
|
||||
const FALLBACK_POLL_INTERVAL = 30_000; // HTTP poll when WS is down
|
||||
const WS_GRACE_BEFORE_POLLING = 15_000; // don't start polling immediately on disconnect
|
||||
|
||||
export function useMessages(contactX25519: string) {
|
||||
const keyFile = useStore(s => s.keyFile);
|
||||
const appendMsg = useStore(s => s.appendMessage);
|
||||
|
||||
const pullAndDecrypt = useCallback(async () => {
|
||||
if (!keyFile || !contactX25519) return;
|
||||
try {
|
||||
const envelopes = await fetchInbox(keyFile.x25519_pub);
|
||||
for (const env of envelopes) {
|
||||
// Only process messages from this contact
|
||||
if (env.sender_pub !== contactX25519) continue;
|
||||
|
||||
const text = decryptMessage(
|
||||
env.ciphertext,
|
||||
env.nonce,
|
||||
env.sender_pub,
|
||||
keyFile.x25519_priv,
|
||||
);
|
||||
if (!text) continue;
|
||||
|
||||
const msg = {
|
||||
id: `${env.sender_pub}_${env.timestamp}_${env.nonce.slice(0, 8)}`,
|
||||
from: env.sender_pub,
|
||||
text,
|
||||
timestamp: env.timestamp,
|
||||
mine: false,
|
||||
};
|
||||
appendMsg(contactX25519, msg);
|
||||
await appendMessage(contactX25519, msg);
|
||||
}
|
||||
} catch (e) {
|
||||
// Don't surface inbox errors aggressively — next event or poll retries
|
||||
console.warn('[useMessages] pull error:', e);
|
||||
}
|
||||
}, [keyFile, contactX25519, appendMsg]);
|
||||
|
||||
// ── Fallback polling state ────────────────────────────────────────────
|
||||
const pollTimerRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||
const disconnectTORef = useRef<ReturnType<typeof setTimeout> | null>(null);
|
||||
|
||||
const startPolling = useCallback(() => {
|
||||
if (pollTimerRef.current) return;
|
||||
console.log('[useMessages] WS down — starting HTTP poll fallback');
|
||||
pullAndDecrypt();
|
||||
pollTimerRef.current = setInterval(pullAndDecrypt, FALLBACK_POLL_INTERVAL);
|
||||
}, [pullAndDecrypt]);
|
||||
|
||||
const stopPolling = useCallback(() => {
|
||||
if (pollTimerRef.current) {
|
||||
clearInterval(pollTimerRef.current);
|
||||
pollTimerRef.current = null;
|
||||
}
|
||||
if (disconnectTORef.current) {
|
||||
clearTimeout(disconnectTORef.current);
|
||||
disconnectTORef.current = null;
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!keyFile || !contactX25519) return;
|
||||
|
||||
const ws = getWSClient();
|
||||
|
||||
// Initial fetch — populate whatever landed before we mounted.
|
||||
pullAndDecrypt();
|
||||
|
||||
// Subscribe to our x25519 inbox — the node emits on mailbox.Store.
|
||||
// Topic filter: only envelopes for ME; we then filter by sender inside
|
||||
// the handler so we only render messages in THIS chat.
|
||||
const offInbox = ws.subscribe('inbox:' + keyFile.x25519_pub, (frame) => {
|
||||
if (frame.event !== 'inbox') return;
|
||||
const d = frame.data as { sender_pub?: string } | undefined;
|
||||
// Optimisation: if the envelope is from a different peer, skip the
|
||||
// whole refetch — we'd just drop it in the sender filter below anyway.
|
||||
if (d?.sender_pub && d.sender_pub !== contactX25519) return;
|
||||
pullAndDecrypt();
|
||||
});
|
||||
|
||||
// Manage fallback polling based on WS connection state.
|
||||
const offConn = ws.onConnectionChange((ok) => {
|
||||
if (ok) {
|
||||
stopPolling();
|
||||
// Catch up anything we missed while disconnected.
|
||||
pullAndDecrypt();
|
||||
} else if (disconnectTORef.current === null) {
|
||||
disconnectTORef.current = setTimeout(startPolling, WS_GRACE_BEFORE_POLLING);
|
||||
}
|
||||
});
|
||||
|
||||
ws.connect();
|
||||
|
||||
return () => {
|
||||
offInbox();
|
||||
offConn();
|
||||
stopPolling();
|
||||
};
|
||||
}, [keyFile, contactX25519, pullAndDecrypt, startPolling, stopPolling]);
|
||||
}
|
||||
61
client-app/hooks/useWellKnownContracts.ts
Normal file
61
client-app/hooks/useWellKnownContracts.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Auto-discover canonical system contracts from the node so the user doesn't
|
||||
* have to paste contract IDs into settings by hand.
|
||||
*
|
||||
* Flow:
|
||||
* 1. On app boot (and whenever nodeUrl changes), call GET /api/well-known-contracts
|
||||
* 2. If the node advertises a `username_registry` and the user has not
|
||||
* manually set `settings.contractId`, auto-populate it and persist.
|
||||
* 3. A user-supplied contractId is never overwritten — so power users can
|
||||
* still pin a non-canonical deployment from settings.
|
||||
*/
|
||||
|
||||
import { useEffect } from 'react';
|
||||
import { fetchWellKnownContracts } from '@/lib/api';
|
||||
import { saveSettings } from '@/lib/storage';
|
||||
import { useStore } from '@/lib/store';
|
||||
|
||||
export function useWellKnownContracts() {
|
||||
const nodeUrl = useStore(s => s.settings.nodeUrl);
|
||||
const contractId = useStore(s => s.settings.contractId);
|
||||
const settings = useStore(s => s.settings);
|
||||
const setSettings = useStore(s => s.setSettings);
|
||||
|
||||
useEffect(() => {
|
||||
let cancelled = false;
|
||||
|
||||
async function run() {
|
||||
if (!nodeUrl) return;
|
||||
const res = await fetchWellKnownContracts();
|
||||
if (cancelled || !res) return;
|
||||
|
||||
const registry = res.contracts['username_registry'];
|
||||
if (!registry) return;
|
||||
|
||||
// Always keep the stored contractId in sync with what the node reports
|
||||
// as canonical. If the user resets their chain or we migrate from a
|
||||
// WASM contract to the native one, the stale contract_id cached in
|
||||
// local storage would otherwise keep the client trying to call a
|
||||
// contract that no longer exists on this chain.
|
||||
//
|
||||
// To still support intentional overrides: the UI's "advanced" section
|
||||
// allows pasting a specific ID — and since that also writes to
|
||||
// settings.contractId, the loop converges back to whatever the node
|
||||
// says after a short delay. Operators who want a hard override should
|
||||
// either run a patched node or pin the value with a wrapper config
|
||||
// outside the app.
|
||||
if (registry.contract_id !== contractId) {
|
||||
const next = { ...settings, contractId: registry.contract_id };
|
||||
setSettings({ contractId: registry.contract_id });
|
||||
await saveSettings(next);
|
||||
console.log('[well-known] synced username_registry =', registry.contract_id,
|
||||
'(was:', contractId || '<empty>', ')');
|
||||
}
|
||||
}
|
||||
|
||||
run();
|
||||
return () => { cancelled = true; };
|
||||
// Re-run when the node URL changes (user switched networks) or when
|
||||
// contractId is cleared.
|
||||
}, [nodeUrl, contractId]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
}
|
||||
Reference in New Issue
Block a user