Skill: Phase 2.B Bridge Safety & Idempotency
Scope: Backend relayer logic in services/, data model in migrations, Python/FastAPI code
When to activate: Phase 2.B implementation, deposit/withdrawal processing, merkle batch operations
Phase 2.B Stack Overview
deposits table (MySQL/Postgres) ← Event indexer reads here
↓
Batcher (pure function: SQL → merkle root)
↓
CronosSettlementAnchor (stores merkle root)
↓ (batch committed, now fetch merkle proofs for each withdrawal)
↓
CronosSigilLockbox (verify proofs + unlock SIGIL)
Critical Rule: Each step must be idempotent. Replaying the same operation twice must produce the same result and not double-spend.
Data Model Requirements
Table: deposits
CREATE TABLE deposits (
id SERIAL PRIMARY KEY,
chain_id INT NOT NULL, -- Which chain (e.g., Cronos testnet = 338)
tx_hash VARCHAR(66) NOT NULL, -- Source tx hash
log_index INT NOT NULL, -- Log index in tx (unique per tx)
depositor VARCHAR(42) NOT NULL, -- User address
amount DECIMAL(36,18) NOT NULL, -- Deposited amount (SIGIL)
recipient VARCHAR(42) NOT NULL, -- Execution chain recipient
block_number BIGINT NOT NULL, -- Confirmations counted from here
confirmed_at TIMESTAMP, -- When confirmations reached 5+
minted_at TIMESTAMP, -- When SIGIL minted (proof verified)
status ENUM('DETECTED', 'CONFIRMED', 'MINTED') DEFAULT 'DETECTED',
error_reason TEXT, -- If minting failed
UNIQUE(chain_id, tx_hash, log_index), -- ← CRITICAL: Prevents duplicate ingestion
INDEX(status),
INDEX(confirmed_at)
);
Idempotency Pattern:
# Querying deposits
deposits = db.query(Deposits).filter(
Deposits.status == 'DETECTED',
Deposits.confirmed_at == None,
Deposits.block_number <= current_block - 5 # Confirmation depth
).limit(10)
# Processing (IDEMPOTENT)
for dep in deposits:
# Before changing status, verify prerequisites
if dep.status != 'DETECTED':
continue # Skip already processed
try:
# Mint SIGIL
mint_tx = bridge_service.mint_deposit(dep)
dep.status = 'MINTED'
dep.minted_at = datetime.now()
except Exception as e:
dep.error_reason = str(e)
dep.status = 'FAILED' # Mark explicitly, don't retry
db.commit() # Atomic: all or nothing
Table: withdrawals
CREATE TABLE withdrawals (
id SERIAL PRIMARY KEY,
chain_id INT NOT NULL, -- Source chain (execution layer)
tx_hash VARCHAR(66) NOT NULL, -- Burn tx hash
log_index INT NOT NULL, -- Log index in tx
withdrawer VARCHAR(42) NOT NULL, -- User address
amount DECIMAL(36,18) NOT NULL, -- Withdrawal amount
recipient VARCHAR(42) NOT NULL, -- Cronos recipient (where SIGIL unlocked)
block_number BIGINT NOT NULL, -- Confirmations counted from here
confirmed_at TIMESTAMP, -- When confirmations reached 5
batched_at TIMESTAMP, -- When included in batch
executed_at TIMESTAMP, -- When proof verified + unlocked
status ENUM('DETECTED', 'CONFIRMED', 'BATCHED', 'EXECUTED', 'FAILED') DEFAULT 'DETECTED',
error_reason TEXT,
batch_id BIGINT, -- Which batch contains this withdrawal
merkle_proof JSON, -- Merkle proof (cached for execution)
UNIQUE(chain_id, tx_hash, log_index), -- ← CRITICAL: Prevents double-processing
INDEX(status),
INDEX(batch_id),
FOREIGN KEY(batch_id) REFERENCES batches(id)
);
Table: batches
CREATE TABLE batches (
id BIGINT PRIMARY KEY, -- Sequential batch ID (1, 2, 3, ...)
merkle_root VARCHAR(66) NOT NULL,
total_amount DECIMAL(36,18) NOT NULL, -- Sum of all withdrawals
leaf_count INT NOT NULL, -- Number of withdrawals in batch
created_at TIMESTAMP, -- When batch was computed
anchored_at TIMESTAMP, -- When merkle root submitted to chain
finalized_at TIMESTAMP, -- When all proofs executed
status ENUM('CREATED', 'ANCHORED', 'FINALIZED') DEFAULT 'CREATED',
UNIQUE(merkle_root), -- ← CRITICAL: Prevents duplicate roots
INDEX(status)
);
Table: batch_leaves
CREATE TABLE batch_leaves (
id SERIAL PRIMARY KEY,
batch_id BIGINT NOT NULL, -- Which batch
withdrawal_id BIGINT NOT NULL, -- Which withdrawal
leaf_index INT NOT NULL, -- Position in merkle tree (0-indexed)
leaf_hash VARCHAR(66) NOT NULL, -- keccak256(withdrawal_id, recipient, amount)
UNIQUE(batch_id, withdrawal_id), -- Each withdrawal in only 1 batch
UNIQUE(batch_id, leaf_index), -- Each position filled once
FOREIGN KEY(batch_id) REFERENCES batches(id),
FOREIGN KEY(withdrawal_id) REFERENCES withdrawals(id)
);
Table: processed_events (Global Deduplication)
CREATE TABLE processed_events (
id SERIAL PRIMARY KEY,
event_source VARCHAR(255) NOT NULL, -- "chain_id:tx_hash:log_index"
event_type VARCHAR(50) NOT NULL, -- 'DEPOSIT', 'WITHDRAWAL', 'RESOLUTION'
processed_at TIMESTAMP DEFAULT NOW(),
UNIQUE(event_source) -- ← CRITICAL: Only process once, ever
);
Core Idempotency Patterns
Pattern 1: UNIQUE Constraint (Prevent Duplicate Ingestion)
# Event indexer
def ingest_deposit_events(block_range):
"""
Idempotent: Safe to call multiple times on same block range.
Duplicate events will hit UNIQUE constraint and be ignored.
"""
for event in fetch_events_from_rpc(block_range):
try:
db.deposits.insert({
'chain_id': event.chain_id,
'tx_hash': event.tx_hash,
'log_index': event.log_index, # ← Makes (chain_id, tx_hash, log_index) unique
'amount': event.amount,
'status': 'DETECTED'
})
except IntegrityError:
# Already ingested. Skip.
continue
db.commit()
Result: Even if RPC returns same event twice, database enforces single-insert.
Pattern 2: State-Based Idempotency (Check Before Acting)
# Minting deposits
def process_confirmed_deposits():
"""
Idempotent: Safe to call multiple times. Only processes DETECTED deposits.
"""
deposits = db.query(Deposits).filter(
Deposits.status == 'DETECTED',
Deposits.confirmed_at.isnot(None)
)
for dep in deposits:
# Check status BEFORE acting
if dep.status != 'DETECTED':
continue # Already processed, skip
try:
mint_receipt = bridge_service.mint(amount=dep.amount, to=dep.recipient)
dep.status = 'MINTED'
dep.minted_at = datetime.now()
db.commit()
except Exception as e:
db.rollback()
# Don't retry indefinitely; mark as failed
dep.status = 'FAILED'
dep.error_reason = str(e)
db.commit()
Result: Rerun same function; it skips already-MINTED deposits, avoiding double-mint.
Pattern 3: Smart Contract Guards (Final Validation)
// CronosSettlementAnchor.sol require(!withdrawalClaims[withdrawalId].claimed, "Already claimed"); // CronosSigilLockbox.sol require(totalLocked >= amount, "Insufficient locked balance");
Result: Even if backend re-submits same withdrawal proof, contract rejects it.
Pattern 4: Merkle Proof Replay Prevention
# Batcher (compute merkle tree)
def create_batch(withdrawal_ids):
"""
Pure function: No DB state, no side effects.
Always produces SAME merkle root for SAME input set.
"""
withdrawals = [db.withdrawals[wid] for wid in withdrawal_ids]
leaves = [
keccak256(bytes(
abi.encodePacked(w.id, w.recipient, w.amount)
))
for w in withdrawals
]
root = merkle_tree(leaves).root
return root # ← Deterministic, idempotent
Executor (replay prevention):
def execute_withdrawal_proof(withdrawal_id, recipient, amount, proof, batch_id):
"""
Calls CronosSigilLockbox.withdraw(withdrawal_id, recipient, amount, proof, batch_id)
Idempotency: Smart contract rejects if:
1. withdrawalClaims[withdrawal_id].claimed == True (already executed)
2. merkle_root doesn't match stored batch commitment
"""
tx = contract.functions.withdraw(
withdrawal_id,
recipient,
amount,
proof, # bytes32[] array
batch_id
).transact()
# Wait for confirmation
receipt = w3.eth.wait_for_transaction_receipt(tx)
if receipt.status == 1: # Success
withdrawal = db.withdrawals[withdrawal_id]
withdrawal.status = 'EXECUTED'
withdrawal.executed_at = datetime.now()
db.commit()
else:
# Transaction failed; don't retry same call
withdrawal.status = 'FAILED'
withdrawal.error_reason = f"Contract reverted: {receipt}"
db.commit()
Confirmation Depth Rule
Even on Cronos testnet, require 5+ block confirmations before acting:
def find_confirmable_deposits():
"""
Only process deposits with 5+ block confirmations.
"""
current_block = w3.eth.block_number
confirmable = db.query(Deposits).filter(
Deposits.status == 'DETECTED',
Deposits.confirmed_at == None,
Deposits.block_number <= current_block - 5 # At least 5 blocks deep
)
for dep in confirmable:
dep.confirmed_at = datetime.now()
db.commit()
return len(confirmable)
Reasoning:
- •Handles micro-reorgs (1-2 blocks)
- •Survives validator crashes + recovery
- •Applies same rule to Phase 2.A (Cronos-only) and Phase 3 (Solana)
Batch Sequencing Rules
Batches MUST be sequential: 1, 2, 3, 4, ...
def create_next_batch(max_size=1000):
"""
Create batch N+1 ONLY after batch N is ANCHORED.
"""
latest_batch = db.query(Batches).order_by(Batches.id.desc()).first()
if latest_batch:
if latest_batch.status != 'ANCHORED':
raise ValueError(f"Batch {latest_batch.id} not anchored yet. Cannot create next batch.")
next_batch_id = latest_batch.id + 1
else:
next_batch_id = 1
# Collect confirmed (not yet batched) withdrawals
withdrawals = db.query(Withdrawals).filter(
Withdrawals.status == 'CONFIRMED',
Withdrawals.batch_id == None
).limit(max_size)
if not withdrawals:
return None
# Compute merkle root
root = compute_merkle_root([w for w in withdrawals])
# Create batch
batch = Batches(
id=next_batch_id,
merkle_root=root,
total_amount=sum(w.amount for w in withdrawals),
leaf_count=len(withdrawals),
status='CREATED'
)
for w in withdrawals:
w.batch_id = next_batch_id
w.status = 'BATCHED'
db.add(batch)
db.commit()
return batch
Why Sequential?
- •Prevents gaps (Batch 1, Batch 3, Batch 5... is fragile)
- •Orders withdrawals deterministically (same inputs → same root)
- •Makes debugging easier (Batch N always contains batched withdrawals N)
Dry-Run Mode
# In relayer.py
class Relayer:
def __init__(self, dry_run=False):
self.dry_run = dry_run # If True, compute but don't submit
def execute_withdrawals(self):
# Get pending withdrawals
withdrawals = self.get_pending()
for w in withdrawals:
proof = self.merkle_utils.get_proof(w.batch_id, w.leaf_index)
if self.dry_run:
# Simulate without submitting
is_valid = self.verify_proof_locally(proof, w)
if is_valid:
print(f"[DRY RUN] Would execute withdrawal {w.id}")
else:
raise ValueError(f"Proof invalid for {w.id}")
else:
# Submit for real
tx_hash = self.submit_withdrawal_tx(w, proof)
w.tx_hash = tx_hash
db.commit()
Usage:
# Test without submitting python relayer.py --dry-run # Test with real submissions python relayer.py --live
Logging Granularity
Every state change MUST log:
import logging
logger = logging.getLogger(__name__)
def process_deposit(dep):
logger.info(
"Processing deposit",
extra={
'deposit_id': dep.id,
'tx_hash': dep.tx_hash,
'block_number': dep.block_number,
'amount': str(dep.amount),
'recipient': dep.recipient
}
)
try:
receipt = bridge_service.mint(dep.amount, dep.recipient)
logger.info(
"Deposit minted successfully",
extra={
'deposit_id': dep.id,
'mint_tx_hash': receipt.transactionHash,
'gas_used': receipt.gasUsed,
'block_number': receipt.blockNumber
}
)
dep.status = 'MINTED'
dep.minted_at = datetime.now()
except Exception as e:
logger.error(
"Deposit mint failed",
extra={
'deposit_id': dep.id,
'error': str(e),
'error_type': type(e).__name__
},
exc_info=True
)
dep.status = 'FAILED'
dep.error_reason = str(e)
db.commit()
Logs should enable monitoring:
- •✅ Early detection of stuck bathes
- •✅ Gas cost trending
- •✅ Error rate spikes
- •✅ Reorg detection (block reorgs cause tx_hash changes)
Cross-Platform Merkle Compatibility
Python and JavaScript MUST produce identical roots:
# Verification test (run before any merkle change) cd verification/ # Python python python-merkle-test.py # JavaScript node cross-platform-merkle-test.js # Compare outputs # Expected: Both output same merkle root hash
Relayer State Machine (ASCII Diagram)
DEPOSIT FLOW:
┌─────────────────────────────────────────────────┐
│ RPC Event: DepositLocked(id, amount, recipient) │
└─────────────────────┬───────────────────────────┘
│
▼
Indexer: Store in deposits table
Status: DETECTED
│
Wait 5+ block confirmations
│
▼
Minter: Check deposit.status == 'DETECTED'
Call bridge_service.mint()
Update deposits.status = 'MINTED'
│
▼
✅ Success (or mark FAILED, don't retry)
WITHDRAWAL FLOW:
┌────────────────────────────────────────────────┐
│ RPC Event: WithdrawalBurned(id, amount, to) │
└─────────────────────┬────────────────────────────┘
│
▼
Indexer: Store in withdrawals table
Status: DETECTED
│
Wait 5+ block confirmations
│
▼
Batcher: Collect all CONFIRMED withdrawals
Compute merkle tree + root
Insert batch into batches table
Create batch_leaves entries
Update withdrawals.status = 'BATCHED'
│
▼
Anchor: Submit merkle root to CronosSettlementAnchor
Update batches.status = 'ANCHORED'
│
▼
Executor: For each BATCHED withdrawal
Compute merkle proof (from tree)
Call CronosSigilLockbox.withdraw(proof, ...)
Update withdrawals.status = 'EXECUTED'
│
▼
✅ Success (or mark FAILED, check contract error)
Monitoring & Alerts
Alert if:
- • Batch creation delayed > 10 minutes
- • Merkle root anchoring failed (tx reverted)
- • Withdrawal proof execution failure > 5% of batch
- • Reorg detected (block reorgs cause deposit/withdrawal tx_hash to change)
- • Confirmation depth violations (deposits processed before 5 blocks)
- • Database constraint violations (UNIQUE violations)
Test Scenarios
def test_idempotent_deposit_ingestion():
"""Ingesting same deposit twice should not double-process."""
deposit_event = make_event(tx_hash="0xABC", log_index=0)
# Ingest once
ingest_events([deposit_event])
assert db.count(Deposits) == 1
# Ingest again
ingest_events([deposit_event])
assert db.count(Deposits) == 1 # Still 1, not 2
def test_idempotent_minting():
"""Reminting same deposit should not double-mint."""
dep = create_deposit(amount=100, status='DETECTED')
# Mint once
process_confirmed_deposits()
assert db.deposits[dep.id].status == 'MINTED'
# Mint again (should skip)
process_confirmed_deposits()
assert db.deposits[dep.id].status == 'MINTED' # Still MINTED, not error
def test_merkle_proof_replay():
"""Executing same withdrawal proof twice should fail on second attempt."""
withdrawal = create_withdrawal(id=42, amount=100)
proof = compute_merkle_proof(withdrawal)
# Execute once
execute_withdrawal(withdrawal, proof)
assert contract.withdrawalClaims[42].claimed == True
# Execute again (should revert on-chain)
try:
execute_withdrawal(withdrawal, proof)
assert False, "Should have reverted"
except ContractRevertError:
pass # Expected
def test_batch_sequencing():
"""Batches must be sequential; cannot create batch N+2 before N+1."""
batch_1 = create_batch(id=1, root="0xAAA")
try:
batch_3 = create_batch(id=3, root="0xCCC")
assert False, "Should not allow gap"
except ValueError as e:
assert "Batch 1 not anchored" in str(e)
References
- •PHASE_2B_RELAYER.md — Full Phase 2.B specification
- •deposits/withdrawals/batches schema — Data model details
- •merkle_utils.py — Cross-platform merkle tree implementation
- •bridge_service.py — Deposit/withdrawal orchestration