refactor: consolidate CI and local dev orchestration (#108)

## Summary
- Extract shared bootstrap functions into `scripts/bootstrap-common.sh` (eliminates ~120 lines of duplicated forge/cast commands from e2e.yml)
- Create reusable `scripts/wait-for-service.sh` for health checks (replaces 60-line inline wait-for-stack)
- Merge dev and CI entrypoints into unified scripts branching on `CI` env var (delete `docker/ci-entrypoints/`)
- Replace 4 per-service CI Dockerfiles with parameterized `docker/Dockerfile.service-ci`
- Add `sync-tax-rates.mjs` to CI image builder stage
- Fix: CI now grants txnBot recenter access (was missing)
- Fix: txnBot funding parameterized (CI=10eth, local=1eth)
- Delete 5 obsolete migration docs and 4 DinD integration files

Net: -1540 lines removed

Closes #107

## Test plan
- [ ] E2E pipeline passes (bootstrap sources shared script, services use old images with commands override)
- [ ] build-ci-images pipeline builds all 4 services with unified Dockerfile
- [ ] Local dev stack boots via `./scripts/dev.sh start`

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: openhands <openhands@all-hands.dev>
Reviewed-on: https://codeberg.org/johba/harb/pulls/108
This commit is contained in:
johba 2026-02-03 12:07:28 +01:00
parent 4277f19b68
commit e5e1308e72
45 changed files with 882 additions and 2627 deletions

View file

@ -13,14 +13,18 @@ when:
path:
include:
- .woodpecker/build-ci-images.yml
- docker/Dockerfile.*-ci
- docker/ci-entrypoints/**
- docker/Dockerfile.service-ci
- docker/Dockerfile.node-ci
- containers/*-entrypoint.sh
- containers/entrypoint-common.sh
- kraiken-lib/**
- onchain/**
- services/ponder/**
- services/txnBot/**
- web-app/**
- landing/**
- scripts/sync-tax-rates.mjs
- scripts/bootstrap-common.sh
steps:
# Compile Solidity contracts to generate ABI files needed by Dockerfiles
@ -28,7 +32,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
# Initialize git submodules (required for forge dependencies)
git submodule update --init --recursive
@ -56,54 +60,85 @@ steps:
# Login to registry
echo "$REGISTRY_PASSWORD" | docker login "$REGISTRY" -u "$REGISTRY_USER" --password-stdin
SHA="${CI_COMMIT_SHA:0:7}"
# Build and push node-ci (base image with Foundry pre-installed)
echo "=== Building node-ci ==="
docker build \
-f docker/Dockerfile.node-ci \
-t "$REGISTRY/harb/node-ci:${CI_COMMIT_SHA:0:7}" \
-t "$REGISTRY/harb/node-ci:$SHA" \
-t "$REGISTRY/harb/node-ci:latest" \
.
docker push "$REGISTRY/harb/node-ci:${CI_COMMIT_SHA:0:7}"
docker push "$REGISTRY/harb/node-ci:$SHA"
docker push "$REGISTRY/harb/node-ci:latest"
# Build and push ponder-ci
# Build and push ponder-ci (unified Dockerfile)
echo "=== Building ponder-ci ==="
docker build \
-f docker/Dockerfile.ponder-ci \
-t "$REGISTRY/harb/ponder-ci:${CI_COMMIT_SHA:0:7}" \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/ponder \
--build-arg SERVICE_PORT=42069 \
--build-arg ENTRYPOINT_SCRIPT=containers/ponder-entrypoint.sh \
--build-arg HEALTHCHECK_RETRIES=12 \
--build-arg HEALTHCHECK_START=20s \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/ponder-ci:$SHA" \
-t "$REGISTRY/harb/ponder-ci:latest" \
.
docker push "$REGISTRY/harb/ponder-ci:${CI_COMMIT_SHA:0:7}"
docker push "$REGISTRY/harb/ponder-ci:$SHA"
docker push "$REGISTRY/harb/ponder-ci:latest"
# Build and push webapp-ci
# Build and push webapp-ci (unified Dockerfile)
echo "=== Building webapp-ci ==="
docker build \
-f docker/Dockerfile.webapp-ci \
-t "$REGISTRY/harb/webapp-ci:${CI_COMMIT_SHA:0:7}" \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=web-app \
--build-arg SERVICE_PORT=5173 \
--build-arg HEALTHCHECK_PATH=/app/ \
--build-arg HEALTHCHECK_RETRIES=84 \
--build-arg HEALTHCHECK_START=15s \
--build-arg ENTRYPOINT_SCRIPT=containers/webapp-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg NEEDS_SYMLINKS=true \
-t "$REGISTRY/harb/webapp-ci:$SHA" \
-t "$REGISTRY/harb/webapp-ci:latest" \
.
docker push "$REGISTRY/harb/webapp-ci:${CI_COMMIT_SHA:0:7}"
docker push "$REGISTRY/harb/webapp-ci:$SHA"
docker push "$REGISTRY/harb/webapp-ci:latest"
# Build and push landing-ci
# Build and push landing-ci (unified Dockerfile)
echo "=== Building landing-ci ==="
docker build \
-f docker/Dockerfile.landing-ci \
-t "$REGISTRY/harb/landing-ci:${CI_COMMIT_SHA:0:7}" \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=landing \
--build-arg SERVICE_PORT=5174 \
--build-arg ENTRYPOINT_SCRIPT=containers/landing-ci-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg HEALTHCHECK_RETRIES=6 \
--build-arg HEALTHCHECK_START=10s \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/landing-ci:$SHA" \
-t "$REGISTRY/harb/landing-ci:latest" \
.
docker push "$REGISTRY/harb/landing-ci:${CI_COMMIT_SHA:0:7}"
docker push "$REGISTRY/harb/landing-ci:$SHA"
docker push "$REGISTRY/harb/landing-ci:latest"
# Build and push txnbot-ci
# Build and push txnbot-ci (unified Dockerfile)
echo "=== Building txnbot-ci ==="
docker build \
-f docker/Dockerfile.txnbot-ci \
-t "$REGISTRY/harb/txnbot-ci:${CI_COMMIT_SHA:0:7}" \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/txnBot \
--build-arg SERVICE_PORT=43069 \
--build-arg HEALTHCHECK_PATH=/status \
--build-arg HEALTHCHECK_RETRIES=4 \
--build-arg HEALTHCHECK_START=10s \
--build-arg ENTRYPOINT_SCRIPT=containers/txnbot-entrypoint.sh \
--build-arg NPM_INSTALL_CMD=install \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/txnbot-ci:$SHA" \
-t "$REGISTRY/harb/txnbot-ci:latest" \
.
docker push "$REGISTRY/harb/txnbot-ci:${CI_COMMIT_SHA:0:7}"
docker push "$REGISTRY/harb/txnbot-ci:$SHA"
docker push "$REGISTRY/harb/txnbot-ci:latest"
echo "=== All CI images built and pushed ==="

View file

@ -10,7 +10,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
@ -20,7 +20,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
cd onchain
export PATH=/root/.foundry/bin:$PATH
@ -36,7 +36,7 @@ steps:
CI: "true"
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
npm config set fund false
npm config set audit false

View file

@ -10,7 +10,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
@ -22,7 +22,7 @@ steps:
HARB_ENV: BASE_SEPOLIA_LOCAL_FORK
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
cd onchain
export PATH=/root/.foundry/bin:$PATH
@ -45,7 +45,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
@ -59,7 +59,7 @@ steps:
from_secret: base_sepolia_rpc
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
cd onchain
export BASE_SEPOLIA_RPC="$BASE_SEPOLIA_RPC"

View file

@ -203,127 +203,18 @@ steps:
- install-deps
commands:
- |
set -eu
# Foundry is pre-installed in node-ci image
echo "=== Foundry version ==="
forge --version
cast --version
echo "=== Waiting for Anvil ==="
for i in $(seq 1 60); do
if cast chain-id --rpc-url http://anvil:8545 2>/dev/null; then
echo "Anvil is ready"
break
fi
echo "Waiting for Anvil... ($i/60)"
sleep 2
done
echo "=== Deploying contracts ==="
cd onchain
# Deploy contracts using forge script
forge script script/DeployLocal.sol:DeployLocal \
--rpc-url http://anvil:8545 \
--private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \
--broadcast
# Extract deployed addresses using Node.js (available in this image)
node -e "
const data = require('./broadcast/DeployLocal.sol/31337/run-latest.json');
const txns = data.transactions;
const kraiken = txns.find(t => t.contractName === 'Kraiken').contractAddress;
const stake = txns.find(t => t.contractName === 'Stake').contractAddress;
const lm = txns.find(t => t.contractName === 'LiquidityManager').contractAddress;
console.log('KRAIKEN=' + kraiken);
console.log('STAKE=' + stake);
console.log('LIQUIDITY_MANAGER=' + lm);
" > ../addresses.txt
. ../addresses.txt
# Get current block number as start block
START_BLOCK=$(cast block-number --rpc-url http://anvil:8545)
echo "=== Contract Deployment Complete ==="
echo "KRAIKEN: $KRAIKEN"
echo "STAKE: $STAKE"
echo "LIQUIDITY_MANAGER: $LIQUIDITY_MANAGER"
echo "START_BLOCK: $START_BLOCK"
# Build kraiken-lib BEFORE writing contracts.env
# (services wait for contracts.env, so kraiken-lib must be ready first)
echo "=== Building kraiken-lib (shared dependency) ==="
cd ../kraiken-lib
npm ci --ignore-scripts
./node_modules/.bin/tsc
cd ../onchain
# Write environment file for other services (absolute path for detached services)
{
echo "KRAIKEN=$KRAIKEN"
echo "STAKE=$STAKE"
echo "LIQUIDITY_MANAGER=$LIQUIDITY_MANAGER"
echo "START_BLOCK=$START_BLOCK"
echo "PONDER_RPC_URL_1=http://anvil:8545"
echo "DATABASE_URL=postgres://ponder:ponder_local@postgres:5432/ponder_local"
echo "RPC_URL=http://anvil:8545"
} > /woodpecker/src/contracts.env
# Write deployments-local.json for E2E tests
printf '{\n "contracts": {\n "Kraiken": "%s",\n "Stake": "%s",\n "LiquidityManager": "%s"\n }\n}\n' \
"$KRAIKEN" "$STAKE" "$LIQUIDITY_MANAGER" > deployments-local.json
echo "=== deployments-local.json written ==="
cat deployments-local.json
# Deployer and fee destination addresses
DEPLOYER_PK=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
DEPLOYER_ADDR=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266
FEE_DEST=0xf6a3eef9088A255c32b6aD2025f83E57291D9011
WETH=0x4200000000000000000000000000000000000006
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
MAX_UINT=0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
echo "=== Funding LiquidityManager ==="
cast send --rpc-url http://anvil:8545 \
--private-key $DEPLOYER_PK \
"$LIQUIDITY_MANAGER" --value 0.1ether
echo "=== Granting recenter access ==="
cast rpc --rpc-url http://anvil:8545 anvil_impersonateAccount $FEE_DEST
cast send --rpc-url http://anvil:8545 --from $FEE_DEST --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" $DEPLOYER_ADDR
cast rpc --rpc-url http://anvil:8545 anvil_stopImpersonatingAccount $FEE_DEST
echo "=== Calling recenter() to seed liquidity ==="
cast send --rpc-url http://anvil:8545 --private-key $DEPLOYER_PK \
"$LIQUIDITY_MANAGER" "recenter()"
echo "=== Seeding application state (initial swap) ==="
# Wrap ETH to WETH
cast send --rpc-url http://anvil:8545 --private-key $DEPLOYER_PK \
$WETH "deposit()" --value 0.02ether
# Approve router
cast send --rpc-url http://anvil:8545 --private-key $DEPLOYER_PK \
$WETH "approve(address,uint256)" $SWAP_ROUTER $MAX_UINT
# Execute initial KRK swap
cast send --legacy --gas-limit 300000 --rpc-url http://anvil:8545 --private-key $DEPLOYER_PK \
$SWAP_ROUTER "exactInputSingle((address,address,uint24,address,uint256,uint256,uint160))" \
"($WETH,$KRAIKEN,10000,$DEPLOYER_ADDR,10000000000000000,0,0)"
# Fund txnBot wallet
TXNBOT_ADDR=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
cast send --rpc-url http://anvil:8545 \
--private-key $DEPLOYER_PK \
--value 10ether \
$TXNBOT_ADDR
echo "TXNBOT_PRIVATE_KEY=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" >> /woodpecker/src/contracts.env
echo "=== Bootstrap complete ==="
# Create a bootstrap wrapper that runs under bash
# (Woodpecker uses /bin/sh which lacks 'source' and bash-isms)
export ANVIL_RPC=http://anvil:8545
export CONTRACT_ENV=/woodpecker/src/contracts.env
export LOG_FILE=/dev/null
export ONCHAIN_DIR="$PWD/onchain"
export TXNBOT_FUND_VALUE=10ether
export TXNBOT_ADDRESS=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
export TXNBOT_PRIVATE_KEY=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
exec bash scripts/ci-bootstrap.sh
# Step 2: Wait for stack to be healthy (services run in background)
# Max 3 minutes - fail fast if services don't come up
- name: wait-for-stack
image: alpine:3.20
depends_on:
@ -331,62 +222,14 @@ steps:
commands:
- |
set -eu
apk add --no-cache curl
apk add --no-cache curl bash
echo "=== Waiting for stack to be healthy (max 7 min) ==="
MAX_ATTEMPTS=84 # 84 * 5s = 420s = 7 minutes
ATTEMPT=0
while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do
ATTEMPT=$((ATTEMPT + 1))
PONDER_OK=0
WEBAPP_OK=0
LANDING_OK=0
CADDY_OK=0
# Check each service with verbose output on failure
# Ponder dev mode serves at root (/) - matches Dockerfile healthcheck
if curl -sf --max-time 3 http://ponder:42069/ > /dev/null 2>&1; then
PONDER_OK=1
fi
# Webapp configured with --base /app/
if curl -sf --max-time 3 http://webapp:5173/app/ > /dev/null 2>&1; then
WEBAPP_OK=1
fi
if curl -sf --max-time 3 http://landing:5174/ > /dev/null 2>&1; then
LANDING_OK=1
fi
# Caddy check: verify proxy is working by checking webapp through Caddy
# Use /app/ since it's a reliable known-good route (landing fallback can return 403 if not ready)
if curl -sf --max-time 3 http://caddy:8081/app/ > /dev/null 2>&1; then
CADDY_OK=1
fi
echo "[$(date +%T)] ($ATTEMPT/$MAX_ATTEMPTS) ponder=$PONDER_OK webapp=$WEBAPP_OK landing=$LANDING_OK caddy=$CADDY_OK"
if [ "$PONDER_OK" = "1" ] && [ "$WEBAPP_OK" = "1" ] && [ "$LANDING_OK" = "1" ] && [ "$CADDY_OK" = "1" ]; then
echo "All services healthy!"
bash scripts/wait-for-service.sh http://ponder:42069/ 420 ponder
bash scripts/wait-for-service.sh http://webapp:5173/app/ 420 webapp
bash scripts/wait-for-service.sh http://landing:5174/ 420 landing
bash scripts/wait-for-service.sh http://caddy:8081/app/ 420 caddy
echo "=== Stack is healthy ==="
exit 0
fi
sleep 5
done
echo "ERROR: Services did not become healthy within 7 minutes"
echo "Final status: ponder=$PONDER_OK webapp=$WEBAPP_OK landing=$LANDING_OK caddy=$CADDY_OK"
# Show more diagnostic info
echo "=== Diagnostic: checking individual endpoints ==="
echo "--- Ponder root (/) ---"
curl -v --max-time 5 http://ponder:42069/ 2>&1 | head -20 || true
echo "--- Webapp /app/ ---"
curl -v --max-time 5 http://webapp:5173/app/ 2>&1 | head -20 || true
echo "--- Landing / ---"
curl -v --max-time 5 http://landing:5174/ 2>&1 | head -20 || true
echo "--- Caddy / (landing via proxy) ---"
curl -v --max-time 5 http://caddy:8081/ 2>&1 | head -20 || true
exit 1
# Step 3: Run E2E tests
- name: run-e2e-tests
@ -428,12 +271,12 @@ steps:
if [ -d playwright-report ]; then
tar -czf artifacts/playwright-report.tgz playwright-report
echo "Playwright report archived"
echo "Playwright report archived"
fi
if [ -d test-results ]; then
tar -czf artifacts/test-results.tgz test-results
echo "Test results archived"
echo "Test results archived"
fi
ls -lh artifacts/ 2>/dev/null || echo "No artifacts"

View file

@ -10,7 +10,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
@ -20,7 +20,7 @@ steps:
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
if ! command -v bc >/dev/null 2>&1; then
apt-get update

View file

@ -12,7 +12,7 @@ steps:
event: tag
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
git submodule update --init --recursive
corepack enable
@ -75,7 +75,7 @@ steps:
event: tag
commands:
- |
bash -lc '
bash -c '
set -euo pipefail
npm config set fund false
npm config set audit false
@ -125,7 +125,7 @@ steps:
from_secret: registry_password
commands:
- |
bash -lc '
bash -c '
set -eo pipefail
if [ -z "${CI_COMMIT_TAG:-}" ]; then
echo "CI_COMMIT_TAG not set" >&2

View file

@ -11,13 +11,33 @@
3. **Compete** - Snatch undervalued positions to optimise returns.
## Operating the Stack
- Start everything with `nohup ./scripts/dev.sh start &` and stop via `./scripts/dev.sh stop`. Do not launch services individually.
- **Restart modes** for faster iteration:
- `./scripts/dev.sh restart --light` - Fast restart (~10-20s): only webapp + txnbot, preserves Anvil/Ponder state. Use for frontend changes.
- `./scripts/dev.sh restart --full` - Full restart (~3-4min): redeploys contracts, fresh state. Use for contract changes.
- Supported environments: `BASE_SEPOLIA_LOCAL_FORK` (default Anvil fork), `BASE_SEPOLIA`, and `BASE`. Match contract addresses and RPCs accordingly.
- The stack uses Docker containers orchestrated via docker-compose. The script boots Anvil, deploys contracts, seeds liquidity, starts Ponder, launches the landing site, and runs the txnBot. Wait for logs to settle before manual testing.
- **Prerequisites**: Docker Engine (Linux) or Colima (Mac). See installation instructions below.
### Quick Start
```bash
nohup ./scripts/dev.sh start & # start (takes ~3-6 min first time)
tail -f nohup.out # watch progress
./scripts/dev.sh health # verify all services healthy
./scripts/dev.sh stop # stop and clean up
```
Do not launch services individually — `dev.sh` enforces phased startup with health gates.
### Restart Modes
- `./scripts/dev.sh restart --light` — Fast (~10-20s): only webapp + txnbot, preserves Anvil/Ponder state. Use for frontend changes.
- `./scripts/dev.sh restart --full` — Full (~3-6min): redeploys contracts, fresh state. Use for contract changes.
### Common Pitfalls
- **Docker disk full**: `dev.sh start` refuses to run if Docker disk usage exceeds 20GB. Fix: `./scripts/dev.sh stop` (auto-prunes) or `docker system prune -af --volumes`.
- **Stale Ponder state**: If Ponder fails with schema errors after contract changes, delete its state: `rm -rf services/ponder/.ponder/` then `./scripts/dev.sh restart --full`.
- **kraiken-lib out of date**: If services fail with import errors or missing exports, rebuild: `./scripts/build-kraiken-lib.sh`. The dev script does this automatically on `start`, but manual rebuilds are needed if you change kraiken-lib while the stack is already running.
- **Container not found errors**: `dev.sh` expects Docker Compose v2 container names (`harb-anvil-1`, hyphens not underscores). Verify with `docker compose version`.
- **Port conflicts**: The stack uses ports 8545 (Anvil), 5173 (webapp), 5174 (landing), 42069 (Ponder), 43069 (txnBot), 8081 (Caddy). Check with `lsof -i :<port>` if startup fails.
- **npm ci failures in containers**: Named Docker volumes cache `node_modules/`. If dependencies change and installs fail, remove the volume: `docker volume rm harb_webapp_node_modules` (or similar), then restart.
### Environments
Supported: `BASE_SEPOLIA_LOCAL_FORK` (default Anvil fork), `BASE_SEPOLIA`, and `BASE`. Match contract addresses and RPCs accordingly.
### Prerequisites
Docker Engine (Linux) or Colima (Mac). See `docs/docker.md` for installation.
## Component Guides
- `onchain/` - Solidity + Foundry contracts, deploy scripts, and fuzzing helpers ([details](onchain/AGENTS.md)).
@ -30,13 +50,13 @@
- Contracts: run `forge build`, `forge test`, and `forge snapshot` inside `onchain/`.
- Fuzzing: scripts under `onchain/analysis/` (e.g., `./analysis/run-fuzzing.sh [optimizer] debugCSV`) generate replayable scenarios.
- Integration: after the stack boots, inspect Anvil logs, hit `http://localhost:8081/api/graphql` for Ponder, and poll `http://localhost:8081/api/txn/status` for txnBot health.
- **E2E Tests**: Playwright-based full-stack tests in `tests/e2e/` verify complete user journeys (mint ETH → swap KRK → stake). Run with `npm run test:e2e` from repo root. Tests use mocked wallet provider with Anvil accounts and automatically start/stop the stack. See `INTEGRATION_TEST_STATUS.md` and `SWAP_VERIFICATION.md` for details.
- **E2E Tests**: Playwright-based full-stack tests in `tests/e2e/` verify complete user journeys (mint ETH → swap KRK → stake). Run with `npm run test:e2e` from repo root. Tests use mocked wallet provider with Anvil accounts. In CI, the Woodpecker e2e pipeline runs these against pre-built service images.
## Version Validation System
- **Contract VERSION**: `Kraiken.sol` exposes a `VERSION` constant (currently v1) that must be incremented for breaking changes to TAX_RATES, events, or core data structures.
- **Ponder Validation**: On startup, Ponder reads the contract VERSION and validates against `COMPATIBLE_CONTRACT_VERSIONS` in `kraiken-lib/src/version.ts`. Fails hard (exit 1) on mismatch to prevent indexing wrong data.
- **Frontend Check**: Web-app validates `KRAIKEN_LIB_VERSION` at runtime (currently placeholder; future: query Ponder GraphQL for full 3-way validation).
- **CI Enforcement**: GitHub workflow validates that contract VERSION is in `COMPATIBLE_CONTRACT_VERSIONS` before merging PRs.
- **CI Enforcement**: Woodpecker `release.yml` pipeline validates that contract VERSION matches `COMPATIBLE_CONTRACT_VERSIONS` before release.
- See `VERSION_VALIDATION.md` for complete architecture, workflows, and troubleshooting.
## Docker Installation & Setup
@ -48,7 +68,8 @@
docker ps # verify installation
```
- **Container Orchestration**: `docker-compose.yml` has NO `depends_on` declarations. All service ordering is handled in `scripts/dev.sh` via phased startup with explicit health checks.
- **Startup Phases**: (1) Create all containers, (2) Start anvil+postgres and wait for healthy, (3) Start bootstrap and wait for completion, (4) Start ponder and wait for healthy, (5) Start webapp/landing/txn-bot, (6) Start caddy.
- **Startup Phases**: (1) Start anvil+postgres and wait for healthy, (2) Start bootstrap and wait for exit, (3) Start ponder and wait for healthy, (4) Start webapp/landing/txn-bot, (5) Start caddy, (6) Smoke test via `scripts/wait-for-service.sh`.
- **Shared Bootstrap**: Contract deployment, seeding, and funding logic lives in `scripts/bootstrap-common.sh`, sourced by both `containers/bootstrap.sh` (local dev) and `scripts/ci-bootstrap.sh` (CI). Constants (FEE_DEST, WETH, SWAP_ROUTER, default keys) are defined once there.
- **Logging Configuration**: All services have log rotation configured (max 10MB per file, 3 files max = 30MB per container) to prevent disk bloat. Logs are automatically rotated by Docker.
- **Disk Management** (Portable, No Per-Machine Setup Required):
- **20GB Hard Limit**: The stack enforces a 20GB total Docker disk usage limit (images + containers + volumes + build cache).
@ -98,8 +119,8 @@
- **Logs**: `journalctl -u woodpecker-server -f` (NOT `docker logs`)
### Pipeline Configs
- `.woodpecker/build-ci-images.yml` — Builds Docker CI images. Triggers on **push** to `master` or `feature/ci` when files in `docker/`, `.woodpecker/`, `kraiken-lib/`, `onchain/out/`, or `web-app/` change.
- `.woodpecker/e2e.yml` — Runs Playwright E2E tests. Triggers on **pull_request** to `master`.
- `.woodpecker/build-ci-images.yml` — Builds Docker CI images using unified `docker/Dockerfile.service-ci`. Triggers on **push** to `master` or `feature/ci` when files in `docker/`, `.woodpecker/`, `containers/`, `kraiken-lib/`, `onchain/`, `services/`, `web-app/`, or `landing/` change.
- `.woodpecker/e2e.yml` — Runs Playwright E2E tests. Bootstrap step sources `scripts/bootstrap-common.sh` for shared deploy/seed logic. Health checks use `scripts/wait-for-service.sh`. Triggers on **pull_request** to `master`.
- Pipeline numbering: even = build-ci-images (push events), odd = E2E (pull_request events). This is not guaranteed but was the observed pattern.
### Monitoring Pipelines via DB
@ -133,17 +154,47 @@ PGPASSWORD='<db_password>' psql -h 127.0.0.1 -U woodpecker -d woodpecker -c \
- **API auth limitation**: The server caches user token hashes in memory. Inserting a token directly into the DB does not work without restarting the server (`sudo systemctl restart woodpecker-server`).
### CI Docker Images
- `docker/Dockerfile.webapp-ci` — Webapp CI image with Vite dev server.
- **Symlinks fix** (lines 57-59): Creates `/web-app`, `/kraiken-lib`, `/onchain` symlinks to work around Vite's `removeBase()` stripping `/app/` prefix from filesystem paths.
- `docker/Dockerfile.service-ci` — Unified parameterized Dockerfile for all service CI images (ponder, webapp, landing, txnBot). Uses `--build-arg` for service-specific configuration (SERVICE_DIR, SERVICE_PORT, ENTRYPOINT_SCRIPT, NEEDS_SYMLINKS, etc.).
- **sync-tax-rates**: Builder stage runs `scripts/sync-tax-rates.mjs` to sync tax rates from `Stake.sol` into kraiken-lib before TypeScript compilation.
- **Symlinks fix** (webapp only, `NEEDS_SYMLINKS=true`): Creates `/web-app`, `/kraiken-lib`, `/onchain` symlinks to work around Vite's `removeBase()` stripping `/app/` prefix from filesystem paths.
- **CI env detection** (`CI=true`): Disables Vue DevTools plugin in `vite.config.ts` to prevent 500 errors caused by path resolution issues with `/app/` base path.
- **HEALTHCHECK**: `--retries=84 --interval=5s` = 420s (7 min) total wait, aligned with `wait-for-stack` step timeout.
- **HEALTHCHECK**: Configurable via build args; webapp uses `--retries=84 --interval=5s` = 420s (7 min), aligned with `wait-for-stack` step timeout.
- **Shared entrypoints**: Each service uses a unified entrypoint script (`containers/<service>-entrypoint.sh`) that branches on `CI=true` env var for CI vs local dev paths. Common helpers in `containers/entrypoint-common.sh`.
- **Shared bootstrap**: `scripts/bootstrap-common.sh` contains shared contract deployment, seeding, and funding functions used by both `containers/bootstrap.sh` (local dev) and `.woodpecker/e2e.yml` (CI).
- CI images are tagged with git SHA and `latest`, pushed to a local registry.
### CI Agent & Registry Auth
- **Agent**: Runs as user `ci` (uid 1001) on `harb-staging`, same host as the dev environment. Binary at `/usr/local/bin/woodpecker-agent`.
- **Registry credentials**: The `ci` user must have Docker auth configured at `/home/ci/.docker/config.json` to pull private images from `registry.niovi.voyage`. If images fail to pull with "no basic auth credentials", fix with:
```bash
sudo mkdir -p /home/ci/.docker
sudo cp /home/debian/.docker/config.json /home/ci/.docker/config.json
sudo chown -R ci:ci /home/ci/.docker
sudo chmod 600 /home/ci/.docker/config.json
```
- **Shared Docker daemon**: The `ci` and `debian` users share the same Docker daemon. Running `docker system prune` as `debian` removes images cached for CI pipelines. If CI image pulls fail after a prune, either fix registry auth (above) or pre-pull images as `debian`: `docker pull registry.niovi.voyage/harb/ponder-ci:latest` etc.
### CI Debugging Tips
- If pipelines aren't being created after a push, check Codeberg webhook delivery logs first.
- The Woodpecker server needs `sudo` to restart. Without it, you cannot: refresh API tokens, clear cached state, or recover from webhook auth issues.
- E2E pipeline failures often come from `wait-for-stack` timing out. Check the webapp HEALTHCHECK alignment and Ponder indexing time.
- The `web-app/vite.config.ts` `allowedHosts` array must include container hostnames (`webapp`, `caddy`) for health checks to succeed inside Docker networks.
- **Never use `bash -lc`** in Woodpecker pipeline commands — login shell resets PATH via `/etc/profile`, losing Foundry and other tools set by Docker ENV. Use `bash -c` instead.
## Codeberg API Access
- **Auth**: Codeberg API tokens are stored in `~/.netrc` (standard `curl --netrc` format, `chmod 600`):
```
machine codeberg.org
login johba
password <api-token>
```
The `password` field holds the API token — this is standard `.netrc` convention, not an actual password.
- **Generate tokens** at `https://codeberg.org/user/settings/applications`.
- **Usage**: Pass `--netrc` to curl for authenticated Codeberg API calls:
```bash
curl --netrc -s https://codeberg.org/api/v1/repos/johba/harb/issues | jq '.[0].title'
```
- **Note**: The repo uses SSH for git push/pull (`ssh://git@codeberg.org`), so `.netrc` is only used for REST API interactions (issues, PRs, releases).
## References
- Deployment history: `onchain/deployments-local.json`, `onchain/broadcast/`.

View file

@ -1,264 +0,0 @@
# Changelog: Version Validation System & Tax Rate Index Refactoring
## Date: 2025-10-07
## Summary
This release implements a comprehensive version validation system to ensure contract-indexer-frontend compatibility and completes the tax rate index refactoring to eliminate fragile decimal lookups.
## Major Features
### 1. Version Validation System
**Contract Changes:**
- `onchain/src/Kraiken.sol`: Added `VERSION = 1` constant (line 28)
- Public constant for runtime validation
- Must be incremented for breaking changes to TAX_RATES, events, or data structures
**kraiken-lib:**
- `kraiken-lib/src/version.ts` (NEW): Central version tracking
- `KRAIKEN_LIB_VERSION = 1`
- `COMPATIBLE_CONTRACT_VERSIONS = [1]`
- `isCompatibleVersion()` validation function
- `getVersionMismatchError()` for detailed error reporting
- `kraiken-lib/package.json`: Added `./version` export
**Ponder Indexer:**
- `services/ponder/src/helpers/version.ts` (NEW): Contract version validation
- Reads `VERSION` from deployed contract at startup
- Validates against `COMPATIBLE_CONTRACT_VERSIONS`
- **Fails hard (exit 1)** on mismatch to prevent indexing wrong data
- `services/ponder/src/kraiken.ts`: Integrated version check on first Transfer event
- `services/ponder/ponder-env.d.ts`: Fixed permissions (chmod 666)
**Frontend:**
- `web-app/src/composables/useVersionCheck.ts` (NEW): Version validation composable
- Validates `KRAIKEN_LIB_VERSION` loads correctly
- Placeholder for future GraphQL-based 3-way validation
- Warns (doesn't fail) on mismatch
**CI/CD:**
- `.github/workflows/validate-version.yml` (NEW): Automated version validation
- Validates contract VERSION is in COMPATIBLE_CONTRACT_VERSIONS
- Runs on PRs and pushes to master/main
- Prevents merging incompatible versions
**Documentation:**
- `VERSION_VALIDATION.md` (NEW): Complete architecture and workflows
- System architecture diagram
- Version bump workflow
- Troubleshooting guide
- Maintenance guidelines
### 2. Container Orchestration Fix
**Problem:** Container dependency graph validator can fail with "container not found in input list" errors when containers have `depends_on` metadata.
**Solution:**
- `docker-compose.yml`: Removed ALL `depends_on` declarations from:
- bootstrap
- ponder
- webapp
- landing
- txn-bot
- caddy
- `scripts/dev.sh`: Implemented phased startup with explicit health checks:
1. Create all containers (`docker-compose up --no-start`)
2. Start anvil & postgres, wait for healthy
3. Start bootstrap, wait for completion
4. Start ponder, wait for healthy
5. Start webapp/landing/txn-bot
6. Start caddy
**Result:** Stack starts reliably without dependency graph errors.
### 3. Tax Rate Index Refactoring (Completion)
**Web App:**
- `web-app/src/composables/useSnatchSelection.ts`:
- Replaced `position.taxRate >= maxTaxRateDecimal` with `posIndex >= selectedTaxRateIndex`
- Fixed test data to match index-based logic
- `web-app/src/composables/usePositions.ts`:
- Replaced decimal-based sorting with index-based sorting
- Changed threshold calculation from average percentage to average index
- `web-app/src/components/collapse/CollapseActive.vue`:
- Changed low tax detection from decimal to index comparison
- `web-app/src/views/GraphView.vue`: **DELETED** (dead code, 63 lines)
**Ponder:**
- `services/ponder/ponder.schema.ts`:
- **CRITICAL FIX**: Import `TAX_RATE_OPTIONS` from kraiken-lib instead of hardcoded array
- Added `taxRateIndex` column to positions table
- Added index on `taxRateIndex` column
- `services/ponder/src/stake.ts`:
- Extract and store `taxRateIndex` from contract events
**Tests:**
- `kraiken-lib/src/tests/taxRates.test.ts`: Fixed Jest ES module compatibility
- `kraiken-lib/jest.config.js``kraiken-lib/jest.config.cjs`: Renamed for CommonJS
- `web-app/src/composables/__tests__/useSnatchSelection.spec.ts`: Fixed test data inconsistencies
## File Changes
### Added Files (7)
1. `.github/workflows/validate-version.yml` - CI/CD validation
2. `VERSION_VALIDATION.md` - Documentation
3. `kraiken-lib/src/version.ts` - Version tracking
4. `kraiken-lib/jest.config.cjs` - Jest config
5. `services/ponder/src/helpers/version.ts` - Ponder validation
6. `web-app/src/composables/useVersionCheck.ts` - Frontend validation
7. `scripts/sync-tax-rates.mjs` - Tax rate sync script
### Deleted Files (2)
1. `web-app/src/views/GraphView.vue` - Dead code
2. `kraiken-lib/jest.config.js` - Replaced with .cjs
### Modified Files (29)
1. `.gitignore` - Added test artifacts, logs, ponder state
2. `CLAUDE.md` - Added Version Validation and Podman Orchestration sections
3. `kraiken-lib/AGENTS.md` - Added version.ts to Key Modules
4. `kraiken-lib/package.json` - Added ./version export
5. `kraiken-lib/src/index.ts` - Export version validation functions
6. `kraiken-lib/src/taxRates.ts` - Generated tax rates with checksums
7. `kraiken-lib/src/tests/taxRates.test.ts` - Fixed Jest compatibility
8. `onchain/src/Kraiken.sol` - Added VERSION constant
9. `docker-compose.yml` - Removed all depends_on declarations
10. `scripts/build-kraiken-lib.sh` - Updated build process
11. `scripts/dev.sh` - Implemented phased startup
12. `services/ponder/AGENTS.md` - Updated documentation
13. `services/ponder/ponder-env.d.ts` - Fixed permissions
14. `services/ponder/ponder.schema.ts` - Import from kraiken-lib, add taxRateIndex
15. `services/ponder/src/kraiken.ts` - Added version validation
16. `services/ponder/src/stake.ts` - Store taxRateIndex
17. `tests/e2e/01-acquire-and-stake.spec.ts` - Test updates
18. `web-app/README.md` - Documentation updates
19. `web-app/env.d.ts` - Type updates
20. `web-app/src/components/StakeHolder.vue` - Index-based logic
21. `web-app/src/components/collapse/CollapseActive.vue` - Index comparison
22. `web-app/src/components/fcomponents/FSelect.vue` - Index handling
23. `web-app/src/composables/__tests__/useSnatchSelection.spec.ts` - Fixed tests
24. `web-app/src/composables/useAdjustTaxRates.ts` - Index-based adjustments
25. `web-app/src/composables/usePositions.ts` - Index-based sorting and threshold
26. `web-app/src/composables/useSnatchSelection.ts` - Index-based filtering
27. `web-app/src/composables/useStake.ts` - Index handling
28-29. Various documentation and configuration updates
## Breaking Changes
### For Contract Deployments
- **New VERSION constant must be present** in Kraiken.sol
- Ponder will fail to start if VERSION is missing or incompatible
### For Ponder
- **Schema migration required**: Add `taxRateIndex` column to positions table
- **Database reset recommended**: Delete `.ponder/` directory before starting
- **New import required**: Import TAX_RATE_OPTIONS from kraiken-lib
### For kraiken-lib Consumers
- **New export**: `kraiken-lib/version` must be built
- Run `./scripts/build-kraiken-lib.sh` to regenerate dist/
## Migration Guide
### Updating to This Version
1. **Stop the stack:**
```bash
./scripts/dev.sh stop
```
2. **Clean Ponder state:**
```bash
rm -rf services/ponder/.ponder/
```
3. **Rebuild kraiken-lib:**
```bash
./scripts/build-kraiken-lib.sh
```
4. **Rebuild contracts (if needed):**
```bash
cd onchain && forge build
```
5. **Start the stack:**
```bash
./scripts/dev.sh start
```
6. **Verify version validation:**
```bash
docker logs harb_ponder_1 | grep "version validated"
```
Should output: `✓ Contract version validated: v1 (kraiken-lib v1)`
### Future Version Bumps
When making breaking changes to TAX_RATES, events, or data structures:
1. **Increment VERSION in Kraiken.sol:**
```solidity
uint256 public constant VERSION = 2;
```
2. **Update COMPATIBLE_CONTRACT_VERSIONS in kraiken-lib/src/version.ts:**
```typescript
export const KRAIKEN_LIB_VERSION = 2;
export const COMPATIBLE_CONTRACT_VERSIONS = [2]; // Or [1, 2] for backward compat
```
3. **Rebuild and redeploy:**
```bash
./scripts/build-kraiken-lib.sh
rm -rf services/ponder/.ponder/
cd onchain && forge script script/Deploy.s.sol
```
## Validation
### Unit Tests
- ✅ kraiken-lib tests pass
- ✅ web-app tests pass
- ✅ Ponder codegen succeeds
- ✅ onchain tests pass
### Integration Tests
- ✅ Stack starts without dependency errors
- ✅ Ponder validates contract version successfully
- ✅ Ponder indexes events with taxRateIndex
- ✅ GraphQL endpoint responds
- ✅ Version validation logs appear in Ponder output
### Manual Verification
```bash
# Check Ponder logs for version validation
docker logs harb_ponder_1 | grep "version validated"
# Output: ✓ Contract version validated: v1 (kraiken-lib v1)
# Check contract VERSION
cast call $KRAIKEN_ADDRESS "VERSION()" --rpc-url http://localhost:8545
# Output: 1
# Query positions with taxRateIndex
curl -X POST http://localhost:42069/graphql \
-d '{"query":"{ positions { id taxRateIndex taxRate } }"}'
```
## Known Issues
None. All blocking issues resolved.
## Contributors
- Claude Code (Anthropic)
## References
- Full architecture: `VERSION_VALIDATION.md`
- Podman orchestration: `CLAUDE.md` § Podman Orchestration
- Tax rate system: `kraiken-lib/src/taxRates.ts`

View file

@ -1,249 +0,0 @@
# CI Migration: Composite Integration Service (Option A)
## Overview
The E2E pipeline has been refactored to use a **composite integration service** that bundles the entire Harb stack into a single Docker image. This eliminates Docker-in-Docker complexity and significantly speeds up CI runs.
## Architecture
### Before (Docker-in-Docker)
```
Woodpecker Pipeline
├─ Service: docker:dind (privileged)
└─ Step: run-e2e
├─ Install docker CLI + docker-compose
├─ Run ./scripts/dev.sh start (nested containers)
│ ├─ anvil
│ ├─ postgres
│ ├─ bootstrap
│ ├─ ponder
│ ├─ webapp
│ ├─ landing
│ ├─ txn-bot
│ └─ caddy
└─ Run Playwright tests
```
**Issues**:
- ~3-5 minutes stack startup overhead per run
- Complex nested container management
- Docker-in-Docker reliability issues
- Dependency reinstallation in every step
### After (Composite Service)
```
Woodpecker Pipeline
├─ Service: harb/integration (contains full stack)
│ └─ Manages internal docker-compose lifecycle
├─ Step: wait-for-stack (30-60s)
└─ Step: run-e2e-tests (Playwright only)
```
**Benefits**:
- ✅ **3-5 minutes faster** - Stack starts in parallel with pipeline setup
- ✅ **Simpler** - No DinD complexity, standard service pattern
- ✅ **Reliable** - Single health check, clearer failure modes
- ✅ **Reusable** - Same image for local testing and CI
## Components
### 1. Integration Image (`docker/Dockerfile.integration`)
- Base: `docker:27-dind`
- Bundles: Full project + docker-compose
- Entrypoint: Starts dockerd + Harb stack automatically
- Healthcheck: Validates GraphQL endpoint is responsive
### 2. CI Compose File (`docker-compose.ci.yml`)
- Simplified interface for local testing
- Exposes port 8081 for stack access
- Persists Docker state in named volume
### 3. New E2E Pipeline (`.woodpecker/e2e-new.yml`)
- Service: `harb/integration` (stack)
- Step 1: Wait for stack health
- Step 2: Run Playwright tests
- Step 3: Collect artifacts
### 4. Build Script (`scripts/build-integration-image.sh`)
- Builds integration image
- Pushes to registry
- Includes local testing instructions
## Migration Steps
### 1. Build the Integration Image
```bash
# Build locally
./scripts/build-integration-image.sh
# Or with custom registry
REGISTRY=localhost:5000 ./scripts/build-integration-image.sh
```
### 2. Push to Registry
```bash
# Login to registry (if using sovraigns.network registry)
docker login registry.sovraigns.network -u ciuser
# Push
docker push registry.sovraigns.network/harb/integration:latest
```
### 3. Activate New Pipeline
```bash
# Backup old E2E pipeline
mv .woodpecker/e2e.yml .woodpecker/e2e-old.yml
# Activate new pipeline
mv .woodpecker/e2e-new.yml .woodpecker/e2e.yml
# Commit changes
git add .woodpecker/e2e.yml docker/ scripts/build-integration-image.sh
git commit -m "ci: migrate E2E to composite integration service"
```
### 4. Update CI Image Build Workflow
Add to release pipeline or create dedicated workflow:
```yaml
# .woodpecker/build-ci-images.yml
kind: pipeline
type: docker
name: build-integration-image
when:
event:
- push
- tag
branch:
- main
- master
steps:
- name: build-and-push
image: docker:27-dind
privileged: true
environment:
DOCKER_HOST: tcp://docker:2375
REGISTRY_USER:
from_secret: registry_user
REGISTRY_PASSWORD:
from_secret: registry_password
commands:
- docker login registry.sovraigns.network -u $REGISTRY_USER -p $REGISTRY_PASSWORD
- ./scripts/build-integration-image.sh
- docker push registry.sovraigns.network/harb/integration:latest
```
## Local Testing
### Test Integration Image Directly
```bash
# Start the stack container
docker run --rm --privileged -p 8081:8081 \
registry.sovraigns.network/harb/integration:latest
# Wait for health (in another terminal)
curl http://localhost:8081/api/graphql
# Run E2E tests against it
npm run test:e2e
```
### Test via docker-compose.ci.yml
```bash
# Start stack
docker-compose -f docker-compose.ci.yml up -d
# Wait for healthy
docker-compose -f docker-compose.ci.yml ps
# Run tests
npm run test:e2e
# Cleanup
docker-compose -f docker-compose.ci.yml down -v
```
## Rollback Plan
If issues arise, revert to old pipeline:
```bash
# Restore old pipeline
mv .woodpecker/e2e-old.yml .woodpecker/e2e.yml
# Commit
git add .woodpecker/e2e.yml
git commit -m "ci: rollback to DinD E2E pipeline"
git push
```
## Performance Comparison
| Metric | Before (DinD) | After (Composite) | Improvement |
|--------|---------------|-------------------|-------------|
| Stack startup | ~180-240s | ~60-90s | **~2-3 min faster** |
| Total E2E time | ~8-10 min | ~5-6 min | **~40% faster** |
| Complexity | High (nested) | Low (standard) | Simpler |
| Reliability | Medium | High | More stable |
## Troubleshooting
### Image build fails
```bash
# Check kraiken-lib builds successfully
./scripts/build-kraiken-lib.sh
# Build with verbose output
docker build -f docker/Dockerfile.integration --progress=plain .
```
### Stack doesn't start in CI
```bash
# Check service logs in Woodpecker
# Services run detached, logs available via Woodpecker UI
# Test locally first
docker run --rm --privileged -p 8081:8081 \
registry.sovraigns.network/harb/integration:latest
```
### Healthcheck times out
- Default timeout: 120s start period + 30 retries × 5s = ~270s max
- First run is slower (pulling images, building)
- Subsequent runs use cached layers (~60-90s)
## Future Improvements
1. **Multi-stage build** - Separate build and runtime images
2. **Layer caching** - Optimize Dockerfile for faster rebuilds
3. **Parallel services** - Start independent services concurrently
4. **Resource limits** - Add memory/CPU constraints for CI
5. **Image variants** - Separate images for different test suites
## Podman to Docker Migration
As part of this work, the Woodpecker agent was migrated from Podman to Docker:
**Changes made**:
- Updated `/etc/woodpecker/agent.env`:
- `WOODPECKER_BACKEND_DOCKER_HOST=unix:///var/run/docker.sock`
- Added `ci` user to `docker` group
- Restarted `woodpecker-agent` service
**Agent label update** (optional, cosmetic):
```bash
# /etc/woodpecker/agent.env
WOODPECKER_AGENT_LABELS=docker=true # (was podman=true)
```
## Questions?
See `CLAUDE.md` for overall stack architecture and `INTEGRATION_TEST_STATUS.md` for E2E test details.

View file

@ -1,260 +0,0 @@
# ✅ CI Migration Complete
**Date**: 2025-11-20
**Branch**: feature/ci
**Commit**: 8c6b6c4
**Status**: **READY FOR TESTING**
---
## All Steps Completed ✅
### 1. Podman → Docker Migration ✅
- ✅ Updated `/etc/woodpecker/agent.env` to use Docker socket
- ✅ Added `ci` user to `docker` group
- ✅ Restarted Woodpecker agent
- ✅ Verified agent running with Docker backend
### 2. Composite Integration Service Created ✅
- ✅ `docker/Dockerfile.integration` - Self-contained stack image
- ✅ `docker/integration-entrypoint.sh` - Orchestration script
- ✅ `docker-compose.ci.yml` - Local testing interface
- ✅ `scripts/build-integration-image.sh` - Build automation
- ✅ `.woodpecker/e2e.yml` - Refactored E2E pipeline
### 3. Documentation Complete ✅
- ✅ `CI_MIGRATION.md` - Technical documentation
- ✅ `MIGRATION_SUMMARY.md` - Executive summary
- ✅ `QUICKSTART_MIGRATION.md` - Testing guide
- ✅ `MIGRATION_STATUS.md` - Status report
- ✅ `MIGRATION_COMPLETE.md` - This file
### 4. Integration Image Built ✅
```
Image: registry.sovraigns.network/harb/integration:latest
Digest: sha256:0543d2466680f4860e77789d5f3d16e7fb02527221b2ec6e3461381d7b207a2c
Size: 515MB (491MB compressed)
Status: Built and pushed to registry
```
### 5. Image Pushed to Registry ✅
- ✅ Logged in to `registry.sovraigns.network`
- ✅ Pushed `harb/integration:latest`
- ✅ Verified image in registry catalog
### 6. Pipeline Activated ✅
- ✅ Backed up old pipeline to `.woodpecker/e2e-old.yml`
- ✅ Activated new pipeline in `.woodpecker/e2e.yml`
- ✅ All changes committed to git (commit 8c6b6c4)
---
## What Changed
### Files Modified/Created (10 files, +1067/-97 lines)
```
M .dockerignore (updated excludes)
A .woodpecker/e2e-old.yml (backup of old DinD pipeline)
M .woodpecker/e2e.yml (new composite service pipeline)
A CI_MIGRATION.md (technical docs)
A MIGRATION_SUMMARY.md (executive summary)
A QUICKSTART_MIGRATION.md (testing guide)
A MIGRATION_STATUS.md (status report)
A docker-compose.ci.yml (local testing)
A docker/Dockerfile.integration (integration image)
A docker/integration-entrypoint.sh (entrypoint script)
A scripts/build-integration-image.sh (build script)
```
### Architecture Changes
**Before (Docker-in-Docker)**:
```
Woodpecker Pipeline
└─ Service: docker:dind
└─ Step: run-e2e
├─ Install docker CLI + docker-compose
├─ ./scripts/dev.sh start (8 nested containers)
└─ npx playwright test
Time: ~8-10 minutes
Complexity: High (nested containers)
```
**After (Composite Service)**:
```
Woodpecker Pipeline
├─ Service: harb/integration (full stack)
└─ Steps:
├─ wait-for-stack (~60-90s)
└─ run-e2e-tests
Time: ~5-6 minutes
Complexity: Low (single service)
```
---
## Next Steps
### 1. Push Branch (if not already done)
```bash
git push origin feature/ci
```
### 2. Test E2E Pipeline
The new E2E pipeline will automatically trigger on pull requests. To test:
**Option A: Create PR**
```bash
# Create PR from feature/ci to master
# Woodpecker will automatically run the new E2E pipeline
```
**Option B: Manual trigger**
- Go to Woodpecker UI: https://ci.sovraigns.network
- Navigate to `johba/harb`
- Manually trigger pipeline for `feature/ci` branch
### 3. Monitor First Run
Watch the pipeline execution:
- **Service start**: `stack` service should become healthy in ~60-90s
- **Step 1**: `wait-for-stack` should succeed
- **Step 2**: `run-e2e-tests` should run Playwright tests
- **Step 3**: `collect-artifacts` should gather results
**Expected total time**: ~5-6 minutes (vs. old ~8-10 minutes)
---
## Performance Improvements
| Metric | Before | After | Improvement |
|--------|--------|-------|-------------|
| Stack startup | 180-240s | 60-90s | **~2-3 min faster** |
| Total E2E time | 8-10 min | 5-6 min | **~40% faster** |
| Complexity | High (DinD + 8 nested) | Low (1 service) | **Much simpler** |
| Code duplication | 100% | 0% | **Eliminated** |
| Reliability | Medium | High | **More stable** |
---
## Verification Checklist
- [x] Podman → Docker migration complete
- [x] Agent running with Docker backend
- [x] Integration Dockerfile created
- [x] docker-compose.ci.yml created
- [x] Build script created
- [x] New E2E pipeline created
- [x] Documentation complete
- [x] Integration image built successfully
- [x] Image pushed to registry
- [x] Old pipeline backed up
- [x] New pipeline activated
- [x] All changes committed
- [ ] **Branch pushed to remote** ← Do this next
- [ ] **E2E pipeline tested in CI** ← Final validation
- [ ] **Performance improvement verified** ← Measure results
---
## Rollback Instructions
If issues arise, rollback is simple:
### Rollback Pipeline Only
```bash
# Restore old E2E pipeline
git checkout HEAD~1 .woodpecker/e2e.yml
git commit -m "ci: rollback to DinD E2E pipeline"
git push
```
### Full Rollback (including Podman)
```bash
# Restore old pipeline
git checkout HEAD~1 .woodpecker/e2e.yml
git commit -m "ci: rollback migration"
git push
# Restore Podman backend (requires sudo)
sudo nano /etc/woodpecker/agent.env
# Change: WOODPECKER_BACKEND_DOCKER_HOST=unix:///run/user/1001/podman/podman.sock
sudo systemctl restart woodpecker-agent
```
---
## Success Metrics to Validate
After the first successful E2E run:
1. **Performance**: E2E pipeline completes in ~5-6 minutes (vs. old ~8-10 min)
2. **Reliability**: No DinD-related errors in logs
3. **Simplicity**: Single service instead of multiple nested containers
4. **Test results**: All Playwright tests pass
---
## Integration Image Details
```yaml
Image: registry.sovraigns.network/harb/integration:latest
Digest: sha256:0543d2466680f4860e77789d5f3d16e7fb02527221b2ec6e3461381d7b207a2c
Size: 515MB (compressed: 491MB)
Base: docker:27-dind
Layers: 23
Registry: Local (registry.sovraigns.network:5000)
```
**Image Contents**:
- Docker daemon (DinD)
- docker-compose
- Full Harb project source
- All entrypoint scripts
- Automatic stack startup on container launch
**Healthcheck**:
- URL: `http://localhost:8081/api/graphql`
- Interval: 5s
- Start period: 120s
- Retries: 30
---
## Known Issues / Notes
1. **First Run**: May be slightly slower due to image pull, but all subsequent runs will be fast
2. **Logs**: Stack logs are inside the service container (view via Woodpecker UI)
3. **Registry**: Uses basic auth (ciuser / some-strong-password)
4. **Agent Label**: Still shows `podman=true` (cosmetic, can be updated later)
---
## Future Optimizations
Once stable, consider:
1. **Multi-stage build**: Separate build and runtime images
2. **Layer caching**: Optimize Dockerfile for faster rebuilds
3. **Image variants**: Separate images for different test suites
4. **Parallel services**: Start independent services concurrently
5. **Consolidate CI images**: Merge `Dockerfile.node-ci` + `Dockerfile.playwright-ci`
---
## Contact
For questions or issues:
- See `CI_MIGRATION.md` for technical details
- See `QUICKSTART_MIGRATION.md` for testing instructions
- See `MIGRATION_SUMMARY.md` for executive summary
---
**Status**: ✅ **COMPLETE - Ready for CI Testing**
All code written, tested, committed, and deployed. The new CI infrastructure is ready for validation.

View file

@ -1,240 +0,0 @@
# Migration Status Report
**Date**: 2025-11-20
**Branch**: feature/ci
**Commit**: 8c6b6c4
## ✅ Completed Steps
### 1. Podman → Docker Migration ✅
- Updated `/etc/woodpecker/agent.env` to use Docker socket
- Added `ci` user to `docker` group
- Restarted Woodpecker agent
- **Verified**: Agent running successfully with Docker backend
### 2. Composite Integration Service Created ✅
- Created `docker/Dockerfile.integration` (self-contained stack image)
- Created `docker/integration-entrypoint.sh` (orchestration script)
- Created `docker-compose.ci.yml` (local testing interface)
- Created `scripts/build-integration-image.sh` (build automation)
- Created refactored `.woodpecker/e2e.yml` pipeline
### 3. Integration Image Built ✅
- **Image**: `registry.sovraigns.network/harb/integration:latest`
- **Size**: 515MB (491MB compressed)
- **Status**: Built locally, ready for push
- **Build time**: ~45 seconds
### 4. Pipeline Activated ✅
- Backed up old E2E pipeline to `.woodpecker/e2e-old.yml`
- Activated new pipeline in `.woodpecker/e2e.yml`
- All changes committed to git
### 5. Documentation Created ✅
- `CI_MIGRATION.md` - Complete technical documentation
- `MIGRATION_SUMMARY.md` - Executive summary
- `QUICKSTART_MIGRATION.md` - Step-by-step testing guide
- `MIGRATION_STATUS.md` - This file
---
## ⚠️ Remaining Actions
### Action 1: Push Integration Image to Registry
**Status**: Blocked - requires registry authentication
**What to do**:
```bash
# Option A: Login with credentials (requires password)
docker login registry.sovraigns.network -u ciuser
# Password: <ask admin>
# Option B: Build image in CI (recommended)
# The E2E pipeline can build the image on first run
# Add a build step before the service in e2e.yml
```
**Recommendation**: For now, let the CI build the image on first run. This tests the full build process in CI and doesn't require manual registry access.
### Action 2: Test New E2E Pipeline
**Options**:
**A. Let CI build image (recommended)**
1. Add build step to `.woodpecker/e2e.yml`:
```yaml
steps:
- name: build-integration-image
image: docker:27-dind
privileged: true
environment:
DOCKER_HOST: tcp://docker:2375
commands:
- ./scripts/build-integration-image.sh
- docker tag registry.sovraigns.network/harb/integration:latest harb-integration:local
services:
- name: stack
image: harb-integration:local # Use locally built image
...
```
**B. Push image manually (requires sudo/password)**
```bash
# Get registry password from admin or check htpasswd
docker login registry.sovraigns.network -u ciuser
docker push registry.sovraigns.network/harb/integration:latest
```
**C. Test locally first**
```bash
# Start the integration container
docker run --rm --privileged -p 8081:8081 \
registry.sovraigns.network/harb/integration:latest
# In another terminal, wait for healthy
timeout 300 sh -c 'until curl -sf http://localhost:8081/api/graphql; do sleep 5; done'
# Run E2E tests
npm run test:e2e
```
---
## Current State
### Files Changed (10 files, +1067/-97 lines)
```
M .dockerignore (updated to exclude more build artifacts)
A .woodpecker/e2e-old.yml (backup of old DinD pipeline)
M .woodpecker/e2e.yml (new composite service pipeline)
A CI_MIGRATION.md (technical documentation)
A MIGRATION_SUMMARY.md (executive summary)
A QUICKSTART_MIGRATION.md (testing guide)
A docker-compose.ci.yml (local testing interface)
A docker/Dockerfile.integration (integration image)
A docker/integration-entrypoint.sh (entrypoint script)
A scripts/build-integration-image.sh (build automation)
```
### Commit Hash
```
8c6b6c4 - ci: migrate to composite integration service + Docker backend
```
### Branch
```
feature/ci
```
---
## Next Steps (Choose One)
### Option A: Build in CI (Recommended)
1. Modify `.woodpecker/e2e.yml` to add build step (see above)
2. Commit change
3. Push to remote
4. Watch CI build and test
**Pros**: Tests full CI build process, no registry credentials needed
**Cons**: First run will be slower (~5-10 min extra)
### Option B: Push Image Manually
1. Get registry password from admin
2. `docker login registry.sovraigns.network -u ciuser`
3. `docker push registry.sovraigns.network/harb/integration:latest`
4. Push branch to remote
5. Watch CI test
**Pros**: Faster first CI run
**Cons**: Requires registry credentials
### Option C: Local Test First
1. Run integration container locally (see commands above)
2. Run E2E tests against it
3. Verify everything works
4. Then proceed with Option A or B
**Pros**: Catch issues before CI
**Cons**: Takes more time upfront
---
## Performance Expectations
### Old Pipeline (DinD)
- Stack startup: ~180-240s
- Total E2E: ~8-10 minutes
- Complexity: High (nested containers)
### New Pipeline (Composite)
- Stack startup: ~60-90s (if image pre-built) OR ~5-10 min (first build)
- Total E2E: ~5-6 minutes (after first build)
- Complexity: Low (single service)
### After First CI Run
- **Image cached**: Subsequent runs will be fast (~5-6 min total)
- **Improvement**: ~3-5 minutes faster per run
- **Simplification**: 1 service instead of DinD + 8 nested containers
---
## Rollback Instructions
If something goes wrong:
```bash
# Restore old E2E pipeline
git checkout HEAD~1 .woodpecker/e2e.yml
# Or manually
mv .woodpecker/e2e-old.yml .woodpecker/e2e.yml
# Commit and push
git add .woodpecker/e2e.yml
git commit -m "ci: rollback to DinD E2E pipeline"
git push
```
To rollback Podman migration (requires sudo):
```bash
# Edit agent config
sudo nano /etc/woodpecker/agent.env
# Change: WOODPECKER_BACKEND_DOCKER_HOST=unix:///run/user/1001/podman/podman.sock
# Restart agent
sudo systemctl restart woodpecker-agent
```
---
## Success Criteria
- [x] Podman → Docker migration complete
- [x] Integration Dockerfile created
- [x] docker-compose.ci.yml created
- [x] Build script created
- [x] New E2E pipeline created
- [x] Documentation complete
- [x] Integration image builds successfully
- [ ] Image pushed to registry OR build-in-CI implemented
- [ ] CI E2E pipeline tested and passing
- [ ] Performance improvement verified (~3-5 min faster)
**Current Status**: 8/10 complete - Ready for final testing
---
## Recommendation
I recommend **Option A (Build in CI)** because:
1. No registry credentials needed
2. Tests the full build process in CI environment
3. Image will be cached for subsequent runs
4. First run will validate everything works end-to-end
The only downside is the first run will take longer (~5-10 min extra for image build), but all subsequent runs will be much faster.
Would you like me to modify the E2E pipeline to build the image in CI?

View file

@ -1,267 +0,0 @@
# CI Infrastructure Migration Summary
**Date**: 2025-11-20
**Branch**: feature/ci
**Status**: ✅ Ready for Testing
## Changes Implemented
### 1. Podman → Docker Migration ✅
**Agent Configuration** (`/etc/woodpecker/agent.env`):
```diff
- WOODPECKER_BACKEND_DOCKER_HOST=unix:///run/user/1001/podman/podman.sock
+ WOODPECKER_BACKEND_DOCKER_HOST=unix:///var/run/docker.sock
```
**User Permissions**:
- Added `ci` user to `docker` group
- Agent now uses native Docker instead of rootless Podman
**Benefits**:
- Simpler configuration
- Better Docker Compose support
- Native DinD compatibility
- Consistency with dev environment
**Status**: ✅ Complete - Agent running successfully with Docker backend
---
### 2. Composite Integration Service (Option A) ✅
Eliminated Docker-in-Docker complexity by creating a self-contained integration image.
**New Files Created**:
1. **`docker/Dockerfile.integration`** - Composite image bundling full stack
- Base: `docker:27-dind`
- Includes: Full project + docker-compose + all dependencies
- Entrypoint: Auto-starts dockerd + Harb stack
- Health: GraphQL endpoint validation
2. **`docker/integration-entrypoint.sh`** - Startup orchestration script
- Starts Docker daemon
- Builds kraiken-lib
- Launches stack via `dev.sh`
- Keeps container alive with graceful shutdown
3. **`docker-compose.ci.yml`** - Simplified CI interface
- Single service: `harb-stack`
- Privileged mode for DinD
- Port 8081 exposed for testing
- Volume for Docker state persistence
4. **`scripts/build-integration-image.sh`** - Image build automation
- Builds kraiken-lib first
- Builds Docker image
- Provides testing + push instructions
5. **`.woodpecker/e2e-new.yml`** - Refactored E2E pipeline
- **Service**: `harb/integration` (full stack)
- **Step 1**: Wait for stack health (~60-90s)
- **Step 2**: Run Playwright tests
- **Step 3**: Collect artifacts
- **Removed**: DinD service, docker CLI installation, nested container management
6. **`CI_MIGRATION.md`** - Complete migration documentation
- Architecture comparison (before/after)
- Migration steps
- Local testing guide
- Troubleshooting
- Performance metrics
**Performance Improvements**:
| Metric | Before | After | Improvement |
|--------|--------|-------|-------------|
| Stack startup | 180-240s | 60-90s | ~2-3 min faster |
| Total E2E | 8-10 min | 5-6 min | ~40% faster |
| Complexity | High | Low | Simpler |
**Status**: ✅ Complete - Files created, ready for build + test
---
## Architecture Changes
### Before: Docker-in-Docker Pattern
```
Woodpecker Pipeline
└─ Service: docker:dind
└─ Step: run-e2e (node-ci image)
├─ apt-get install docker-cli docker-compose
├─ DOCKER_HOST=tcp://docker:2375
├─ ./scripts/dev.sh start (creates 8 nested containers)
│ ├─ anvil
│ ├─ postgres
│ ├─ bootstrap
│ ├─ ponder
│ ├─ webapp
│ ├─ landing
│ ├─ txn-bot
│ └─ caddy
└─ npx playwright test
```
### After: Composite Service Pattern
```
Woodpecker Pipeline
├─ Service: harb/integration (self-contained stack)
│ └─ Internal: dockerd + docker-compose managing 8 services
└─ Steps:
├─ wait-for-stack (curl healthcheck)
└─ run-e2e-tests (playwright only)
```
---
## Next Steps
### 1. Build Integration Image
```bash
cd /home/debian/harb-ci
./scripts/build-integration-image.sh
```
**Expected time**: 5-10 minutes (first build)
### 2. Test Locally (Optional)
```bash
# Start stack container
docker run --rm --privileged -p 8081:8081 \
registry.sovraigns.network/harb/integration:latest
# In another terminal, verify health
curl http://localhost:8081/api/graphql
# Run E2E tests
npm run test:e2e
```
### 3. Push to Registry
```bash
# Login (if needed)
docker login registry.sovraigns.network -u ciuser
# Push
docker push registry.sovraigns.network/harb/integration:latest
```
### 4. Activate New Pipeline
```bash
# Backup old pipeline
mv .woodpecker/e2e.yml .woodpecker/e2e-old.yml
# Activate new pipeline
mv .woodpecker/e2e-new.yml .woodpecker/e2e.yml
# Commit
git add -A
git commit -m "ci: migrate to composite integration service + Docker backend"
git push origin feature/ci
```
### 5. Test in CI
Create a PR or manually trigger the E2E pipeline in Woodpecker UI.
**Expected behavior**:
- `harb/integration` service starts
- Stack becomes healthy in ~60-90s
- Playwright tests run against `http://stack:8081`
- Artifacts collected
---
## Rollback Plan
If issues occur, revert is simple:
```bash
# Restore old E2E pipeline
mv .woodpecker/e2e-old.yml .woodpecker/e2e.yml
# Revert Podman backend (requires sudo)
sudo vi /etc/woodpecker/agent.env
# Change: WOODPECKER_BACKEND_DOCKER_HOST=unix:///run/user/1001/podman/podman.sock
sudo systemctl restart woodpecker-agent
# Commit
git add .woodpecker/e2e.yml
git commit -m "ci: rollback migration"
git push
```
---
## Files Modified/Created
### Created
- `docker/Dockerfile.integration`
- `docker/integration-entrypoint.sh`
- `docker-compose.ci.yml`
- `scripts/build-integration-image.sh`
- `.woodpecker/e2e-new.yml`
- `CI_MIGRATION.md`
- `MIGRATION_SUMMARY.md` (this file)
### Modified
- `/etc/woodpecker/agent.env` (via sudo)
- User `ci` groups (via sudo)
### To Be Renamed (on activation)
- `.woodpecker/e2e.yml``.woodpecker/e2e-old.yml` (backup)
- `.woodpecker/e2e-new.yml``.woodpecker/e2e.yml` (activate)
---
## Cleanup Opportunities (Future)
Once migration is stable:
1. **Remove old E2E pipeline**: Delete `.woodpecker/e2e-old.yml`
2. **Stop Podman service**: `sudo systemctl disable podman-api-ci`
3. **Update agent label**: Change `podman=true``docker=true` in agent.env
4. **Consolidate CI images**: Merge `Dockerfile.node-ci` + `Dockerfile.playwright-ci`
5. **Remove DinD references**: Clean up old documentation
---
## Questions & Issues
### Image build fails?
- Check `./scripts/build-kraiken-lib.sh` runs successfully
- Ensure Docker daemon is running
- Check disk space: `df -h` and `docker system df`
### Stack doesn't become healthy in CI?
- Check Woodpecker service logs
- Increase healthcheck `start_period` or `retries` in e2e-new.yml
- Test image locally first
### E2E tests fail?
- Verify stack URLs are correct (`http://stack:8081` for service-to-service)
- Check if stack actually started (service logs)
- Ensure Playwright image has network access to stack service
---
## Success Criteria
- [x] Podman → Docker migration complete
- [x] Integration Dockerfile created
- [x] docker-compose.ci.yml created
- [x] Build script created
- [x] New E2E pipeline created
- [x] Documentation written
- [ ] Integration image builds successfully
- [ ] Local test passes
- [ ] Image pushed to registry
- [ ] CI E2E pipeline passes
**Current Status**: Ready for testing phase

View file

@ -1,196 +0,0 @@
# Quick Start: CI Migration Testing
## Status: ✅ Ready to Build & Test
All code is written. Follow these steps to activate the new CI infrastructure.
---
## Step 1: Build the Integration Image (~5-10 min)
```bash
cd /home/debian/harb-ci
./scripts/build-integration-image.sh
```
**What it does**: Builds a Docker image containing the full Harb stack
**Expected output**: `✓ Image built successfully: registry.sovraigns.network/harb/integration:latest`
---
## Step 2: Test Locally (Optional, ~5 min)
```bash
# Terminal 1: Start the stack
docker run --rm --privileged -p 8081:8081 \
registry.sovraigns.network/harb/integration:latest
# Terminal 2: Wait for healthy (~60-90s)
timeout 300 sh -c 'until curl -sf http://localhost:8081/api/graphql; do sleep 5; done'
echo "Stack is healthy!"
# Terminal 3: Run E2E tests
cd /home/debian/harb-ci
npm run test:e2e
# Cleanup: Ctrl+C in Terminal 1
```
---
## Step 3: Push to Registry
```bash
# Login to registry
docker login registry.sovraigns.network -u ciuser
# Password: (ask admin or check /etc/docker/registry/htpasswd)
# Push image
docker push registry.sovraigns.network/harb/integration:latest
```
---
## Step 4: Activate New Pipeline
```bash
cd /home/debian/harb-ci
# Backup old E2E pipeline
mv .woodpecker/e2e.yml .woodpecker/e2e-old.yml
# Activate new pipeline
mv .woodpecker/e2e-new.yml .woodpecker/e2e.yml
# Stage all changes
git add -A
# Commit
git commit -m "ci: migrate to composite integration service
- Migrate agent from Podman to Docker
- Create composite harb/integration image
- Refactor E2E pipeline to use service pattern
- Eliminate Docker-in-Docker complexity
- Expected improvement: ~3-5 min faster E2E runs"
# Push to trigger CI
git push origin feature/ci
```
---
## Step 5: Monitor CI Run
1. Open Woodpecker UI: https://ci.sovraigns.network
2. Navigate to `johba/harb` repository
3. Find the pipeline for your latest push
4. Watch the `e2e` pipeline:
- **Service**: `stack` should start and become healthy (~60-90s)
- **Step 1**: `wait-for-stack` should succeed
- **Step 2**: `run-e2e-tests` should pass
- **Step 3**: `collect-artifacts` should gather results
---
## Troubleshooting
### Build fails: "kraiken-lib build failed"
```bash
# Test kraiken-lib build separately
./scripts/build-kraiken-lib.sh
# Check for errors, fix, then rebuild
./scripts/build-integration-image.sh
```
### Local test: Stack doesn't start
```bash
# Check Docker daemon is running
docker info
# Check disk space (need ~10GB)
df -h
docker system df
# View container logs
docker logs <container-id>
```
### CI: Healthcheck timeout
- **Cause**: First run pulls images, takes longer (~2-3 min)
- **Fix**: Increase `start_period` in `.woodpecker/e2e-new.yml` line 18:
```yaml
start_period: 180s # was 120s
```
### CI: "Image not found"
- **Cause**: Forgot to push to registry
- **Fix**: Run Step 3 (push to registry)
---
## Rollback (if needed)
```bash
# Restore old pipeline
mv .woodpecker/e2e-old.yml .woodpecker/e2e.yml
git add .woodpecker/e2e.yml
git commit -m "ci: rollback to DinD E2E pipeline"
git push
```
---
## File Checklist
All files created and ready:
- [x] `docker/Dockerfile.integration` - Integration image definition
- [x] `docker/integration-entrypoint.sh` - Startup script
- [x] `docker-compose.ci.yml` - CI compose file
- [x] `scripts/build-integration-image.sh` - Build automation
- [x] `.woodpecker/e2e-new.yml` - New E2E pipeline
- [x] `CI_MIGRATION.md` - Full documentation
- [x] `MIGRATION_SUMMARY.md` - Change summary
- [x] `QUICKSTART_MIGRATION.md` - This file
---
## Expected Timeline
| Step | Time | Can Skip? |
|------|------|-----------|
| 1. Build image | 5-10 min | No |
| 2. Local test | 5 min | Yes (recommended though) |
| 3. Push to registry | 1 min | No |
| 4. Activate pipeline | 1 min | No |
| 5. Monitor CI | 5-6 min | No |
| **Total** | **17-23 min** | - |
---
## Success Indicators
**Build succeeds**: Image tagged as `registry.sovraigns.network/harb/integration:latest`
**Local test passes**: GraphQL endpoint responds, Playwright tests pass
**Registry push succeeds**: Image visible in registry
**CI pipeline passes**: All steps green in Woodpecker UI
**Performance improved**: E2E run completes in ~5-6 min (was 8-10 min)
---
## Next Actions
After successful CI run:
1. **Monitor stability** - Run a few more PRs to ensure consistency
2. **Update documentation** - Add new CI architecture to `CLAUDE.md`
3. **Clean up** - Remove `.woodpecker/e2e-old.yml` after 1 week
4. **Optimize** - Consider multi-stage builds for faster rebuilds
5. **Consolidate** - Merge CI images (`Dockerfile.node-ci` + `Dockerfile.playwright-ci`)
---
**Questions?** See `CI_MIGRATION.md` for detailed documentation.

View file

@ -124,28 +124,20 @@ export function useVersionCheck() {
### 5. CI/CD Validation
**File:** `.github/workflows/validate-version.yml`
**File:** `.woodpecker/release.yml` (version-check step)
```yaml
- name: Extract versions and validate
run: |
CONTRACT_VERSION=$(grep -oP 'VERSION\s*=\s*\K\d+' onchain/src/Kraiken.sol)
LIB_VERSION=$(grep -oP 'KRAIKEN_LIB_VERSION\s*=\s*\K\d+' kraiken-lib/src/version.ts)
COMPATIBLE=$(grep -oP 'COMPATIBLE_CONTRACT_VERSIONS\s*=\s*\[\K[^\]]+' kraiken-lib/src/version.ts)
The Woodpecker release pipeline validates version consistency on tagged releases. The `version-check` step:
1. Builds kraiken-lib (including `sync-tax-rates.mjs`)
2. Runs an inline Node.js script that:
- Extracts `VERSION` from `Kraiken.sol`
- Extracts `KRAIKEN_LIB_VERSION` and `COMPATIBLE_CONTRACT_VERSIONS` from `kraiken-lib/src/version.ts`
- Fails if contract VERSION differs from lib VERSION
- Fails if contract VERSION is not in COMPATIBLE_CONTRACT_VERSIONS
if echo ",$COMPATIBLE," | grep -q ",$CONTRACT_VERSION,"; then
echo "✓ Version sync validated"
else
exit 1
fi
```
**Triggered on:**
- PRs touching `Kraiken.sol` or `version.ts`
- Pushes to `master`/`main`
**Triggered on:** tag events (releases)
**Prevents:**
- Merging incompatible versions
- Releasing with incompatible versions
- Deploying with stale kraiken-lib
## Workflows

View file

@ -26,48 +26,30 @@ if [[ -n "$GIT_BRANCH" ]]; then
fi
fi
fi
STATE_DIR=$ROOT_DIR/tmp/containers
LOG_DIR=$STATE_DIR/logs
SETUP_LOG=$LOG_DIR/setup.log
CONTRACT_ENV=$STATE_DIR/contracts.env
TXNBOT_ENV=$STATE_DIR/txnBot.env
MNEMONIC_FILE=$ROOT_DIR/onchain/.secret.local
mkdir -p "$LOG_DIR"
: >"$SETUP_LOG"
# ── Configure shared bootstrap variables ──
ANVIL_RPC=${ANVIL_RPC:-"http://anvil:8545"}
FEE_DEST=0xf6a3eef9088A255c32b6aD2025f83E57291D9011
WETH=0x4200000000000000000000000000000000000006
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
MAX_UINT=0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
DEFAULT_DEPLOYER_PK=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
DEFAULT_DEPLOYER_ADDR=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266
DEPLOYER_PK=${DEPLOYER_PK:-$DEFAULT_DEPLOYER_PK}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$DEFAULT_DEPLOYER_ADDR}
CONTRACT_ENV=$STATE_DIR/contracts.env
LOG_FILE=$SETUP_LOG
ONCHAIN_DIR=$ROOT_DIR/onchain
TXNBOT_FUND_VALUE=${TXNBOT_FUND_VALUE:-1ether}
log() {
echo "[bootstrap] $*"
}
# Source shared bootstrap functions
# shellcheck source=../scripts/bootstrap-common.sh
source "$ROOT_DIR/scripts/bootstrap-common.sh"
BOOTSTRAP_START=$(date +%s%3N)
wait_for_rpc() {
for _ in {1..120}; do
if cast chain-id --rpc-url "$ANVIL_RPC" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
log "Timed out waiting for Anvil at $ANVIL_RPC"
return 1
}
# ── Local-only helpers ─────────────────────────────────────────────────
maybe_set_deployer_from_mnemonic() {
if [[ -n "$DEPLOYER_PK" && -n "$DEPLOYER_ADDR" ]]; then
if [[ -n "$DEPLOYER_PK" && "$DEPLOYER_PK" != "$DEFAULT_DEPLOYER_PK" ]]; then
return
fi
if [[ -f "$MNEMONIC_FILE" ]]; then
@ -76,12 +58,10 @@ maybe_set_deployer_from_mnemonic() {
if [[ -n "$mnemonic" ]]; then
pk="$(cast wallet private-key --mnemonic "$mnemonic" --mnemonic-derivation-path "m/44'/60'/0'/0/0")"
addr="$(cast wallet address --private-key "$pk")"
DEPLOYER_PK=${DEPLOYER_PK:-$pk}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$addr}
DEPLOYER_PK=${pk}
DEPLOYER_ADDR=${addr}
fi
fi
DEPLOYER_PK=${DEPLOYER_PK:-$DEFAULT_DEPLOYER_PK}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$DEFAULT_DEPLOYER_ADDR}
}
derive_txnbot_wallet() {
@ -91,115 +71,14 @@ derive_txnbot_wallet() {
if [[ -n "$mnemonic" ]]; then
TXNBOT_PRIVATE_KEY="$(cast wallet private-key --mnemonic "$mnemonic" --mnemonic-index 2)"
TXNBOT_ADDRESS="$(cast wallet address --private-key "$TXNBOT_PRIVATE_KEY")"
log "Derived txnBot wallet: $TXNBOT_ADDRESS (account index 2)"
bootstrap_log "Derived txnBot wallet: $TXNBOT_ADDRESS (account index 2)"
return
fi
fi
# Fallback to hardcoded Anvil account 1
TXNBOT_PRIVATE_KEY=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
TXNBOT_ADDRESS=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
log "Using default txnBot wallet: $TXNBOT_ADDRESS"
}
run_forge_script() {
log "Deploying contracts to fork"
pushd "$ROOT_DIR/onchain" >/dev/null
forge script script/DeployLocal.sol --fork-url "$ANVIL_RPC" --broadcast >>"$SETUP_LOG" 2>&1
popd >/dev/null
}
extract_addresses() {
local run_file
run_file="$(ls -t "$ROOT_DIR/onchain/broadcast/DeployLocal.sol"/*/run-latest.json 2>/dev/null | head -n1)"
if [[ -z "$run_file" ]]; then
log "Deployment artifact not found"
exit 1
fi
log "Using artifact ${run_file#$ROOT_DIR/}"
LIQUIDITY_MANAGER="$(jq -r '.transactions[] | select(.contractName=="LiquidityManager") | .contractAddress' "$run_file" | head -n1)"
KRAIKEN="$(jq -r '.transactions[] | select(.contractName=="Kraiken") | .contractAddress' "$run_file" | head -n1)"
STAKE="$(jq -r '.transactions[] | select(.contractName=="Stake") | .contractAddress' "$run_file" | head -n1)"
DEPLOY_BLOCK="$(jq -r '.receipts[0].blockNumber' "$run_file" | xargs printf "%d")"
if [[ -z "$LIQUIDITY_MANAGER" || "$LIQUIDITY_MANAGER" == "null" ]]; then
log "LiquidityManager address missing"
exit 1
fi
cat >"$CONTRACT_ENV" <<EOCONTRACTS
LIQUIDITY_MANAGER=$LIQUIDITY_MANAGER
KRAIKEN=$KRAIKEN
STAKE=$STAKE
EOCONTRACTS
}
fund_liquidity_manager() {
log "Funding LiquidityManager"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$LIQUIDITY_MANAGER" --value 0.1ether >>"$SETUP_LOG" 2>&1
}
grant_recenter_access() {
log "Granting recenter access"
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$DEPLOYER_ADDR" >>"$SETUP_LOG" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
if [[ -n "$TXNBOT_ADDRESS" ]]; then
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$TXNBOT_ADDRESS" >>"$SETUP_LOG" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
fi
}
call_recenter() {
local recenter_pk="$DEPLOYER_PK"
local recenter_addr="$DEPLOYER_ADDR"
if [[ -n "$TXNBOT_ADDRESS" ]]; then
recenter_pk="$TXNBOT_PRIVATE_KEY"
recenter_addr="$TXNBOT_ADDRESS"
fi
log "Calling recenter() via $recenter_addr"
cast send --rpc-url "$ANVIL_RPC" --private-key "$recenter_pk" \
"$LIQUIDITY_MANAGER" "recenter()" >>"$SETUP_LOG" 2>&1
}
seed_application_state() {
log "Wrapping ETH to WETH"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "deposit()" --value 0.02ether >>"$SETUP_LOG" 2>&1
log "Approving router"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "approve(address,uint256)" "$SWAP_ROUTER" "$MAX_UINT" >>"$SETUP_LOG" 2>&1
log "Executing initial KRK swap"
cast send --legacy --gas-limit 300000 --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$SWAP_ROUTER" "exactInputSingle((address,address,uint24,address,uint256,uint256,uint160))" \
"($WETH,$KRAIKEN,10000,$DEPLOYER_ADDR,10000000000000000,0,0)" >>"$SETUP_LOG" 2>&1
}
prime_chain() {
log "Pre-mining 5 blocks (minimal warmup for fast Ponder sync)..."
# Mine just 5 blocks - enough for Ponder to have some history but keeps sync fast
if cast rpc --rpc-url "$ANVIL_RPC" anvil_mine "0x5" "0x1" >/dev/null 2>&1; then
log "Mined 5 blocks"
else
log "Batch mining failed, using individual evm_mine calls"
for i in {1..5}; do
cast rpc --rpc-url "$ANVIL_RPC" evm_mine >/dev/null 2>&1 || true
done
fi
log "Pre-mining complete"
}
write_deployments_json() {
cat >"$ROOT_DIR/onchain/deployments-local.json" <<EODEPLOYMENTS
{
"contracts": {
"Kraiken": "$KRAIKEN",
"Stake": "$STAKE",
"LiquidityManager": "$LIQUIDITY_MANAGER"
}
}
EODEPLOYMENTS
TXNBOT_PRIVATE_KEY=$DEFAULT_TXNBOT_PK
TXNBOT_ADDRESS=$DEFAULT_TXNBOT_ADDR
bootstrap_log "Using default txnBot wallet: $TXNBOT_ADDRESS"
}
write_ponder_env() {
@ -215,9 +94,10 @@ EOPONDER
}
write_txn_bot_env() {
local txnbot_env=$STATE_DIR/txnBot.env
local provider_url=${TXNBOT_PROVIDER_URL:-$ANVIL_RPC}
local graphql_endpoint=${TXNBOT_GRAPHQL_ENDPOINT:-http://ponder:42069/graphql}
cat >"$TXNBOT_ENV" <<EOTXNBOT
cat >"$txnbot_env" <<EOTXNBOT
ENVIRONMENT=BASE_SEPOLIA_LOCAL_FORK
PROVIDER_URL=$provider_url
PRIVATE_KEY=$TXNBOT_PRIVATE_KEY
@ -229,26 +109,32 @@ PORT=43069
EOTXNBOT
}
fund_txn_bot_wallet() {
if [[ -z "$TXNBOT_ADDRESS" ]]; then
return
prime_chain() {
bootstrap_log "Pre-mining 5 blocks (minimal warmup for fast Ponder sync)..."
if cast rpc --rpc-url "$ANVIL_RPC" anvil_mine "0x5" "0x1" >/dev/null 2>&1; then
bootstrap_log "Mined 5 blocks"
else
bootstrap_log "Batch mining failed, using individual evm_mine calls"
for i in {1..5}; do
cast rpc --rpc-url "$ANVIL_RPC" evm_mine >/dev/null 2>&1 || true
done
fi
log "Funding txnBot wallet $TXNBOT_ADDRESS"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$TXNBOT_ADDRESS" --value "$TXNBOT_FUND_VALUE" >>"$SETUP_LOG" 2>&1 || true
local wei hex
wei="$(cast --to-unit "$TXNBOT_FUND_VALUE" wei)"
hex="$(cast --to-hex "$wei")"
cast rpc --rpc-url "$ANVIL_RPC" anvil_setBalance "$TXNBOT_ADDRESS" "$hex" >>"$SETUP_LOG" 2>&1
bootstrap_log "Pre-mining complete"
}
# ── Main ───────────────────────────────────────────────────────────────
main() {
log "Waiting for Anvil"
local start_time
start_time=$(date +%s%3N)
bootstrap_log "Waiting for Anvil"
wait_for_rpc
maybe_set_deployer_from_mnemonic
derive_txnbot_wallet
run_forge_script
extract_addresses
write_contracts_env
fund_liquidity_manager
grant_recenter_access
call_recenter
@ -260,14 +146,17 @@ main() {
prime_chain &
local prime_pid=$!
wait "$prime_pid"
BOOTSTRAP_END=$(date +%s%3N)
elapsed_ms=$((BOOTSTRAP_END - BOOTSTRAP_START))
local end_time
end_time=$(date +%s%3N)
local elapsed_ms=$((end_time - start_time))
local elapsed_sec
elapsed_sec=$(awk -v ms="$elapsed_ms" 'BEGIN { printf "%.3f", ms/1000 }')
log "Bootstrap complete in ${elapsed_sec}s"
log "Kraiken: $KRAIKEN"
log "Stake: $STAKE"
log "LiquidityManager: $LIQUIDITY_MANAGER"
log "txnBot: $TXNBOT_ADDRESS"
bootstrap_log "Bootstrap complete in ${elapsed_sec}s"
bootstrap_log "Kraiken: $KRAIKEN"
bootstrap_log "Stake: $STAKE"
bootstrap_log "LiquidityManager: $LIQUIDITY_MANAGER"
bootstrap_log "txnBot: $TXNBOT_ADDRESS"
}
main "$@"

59
containers/entrypoint-common.sh Executable file
View file

@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Shared helpers for service entrypoints (local dev mode).
# Source this file in each entrypoint script.
# Checkout a git branch if GIT_BRANCH is set.
# Args: $1 = root directory, $2 = log prefix
entrypoint_checkout_branch() {
local root_dir="$1"
local prefix="$2"
local git_branch="${GIT_BRANCH:-}"
if [[ -z "$git_branch" ]]; then
return
fi
cd "$root_dir"
git config --global --add safe.directory "$root_dir" 2>/dev/null || true
local current
current=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$current" != "$git_branch" ]]; then
echo "[$prefix] Switching to branch: $git_branch"
if git rev-parse --verify "$git_branch" >/dev/null 2>&1; then
git checkout "$git_branch" 2>/dev/null || echo "[$prefix] WARNING: Could not checkout $git_branch"
else
git fetch origin "$git_branch" 2>/dev/null || true
git checkout "$git_branch" 2>/dev/null || echo "[$prefix] WARNING: Could not checkout $git_branch"
fi
fi
}
# Validate kraiken-lib dist exists.
# Args: $1 = root directory, $2 = log prefix
entrypoint_require_kraiken_lib() {
local root_dir="$1"
local prefix="$2"
local required_dist="$root_dir/kraiken-lib/dist/index.js"
if [[ ! -f "$required_dist" ]]; then
echo "[$prefix] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
}
# Install node_modules if needed (named volume may be empty).
# Args: $1 = log prefix
entrypoint_install_deps() {
local prefix="$1"
if [[ ! -d node_modules/.bin ]]; then
echo "[$prefix] Installing dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[$prefix] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[$prefix] Using cached node_modules from volume"
fi
}

View file

@ -0,0 +1,5 @@
#!/usr/bin/env bash
# Minimal CI entrypoint for landing — just starts the dev server.
set -euo pipefail
cd /app/landing
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -1,50 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[landing-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[landing-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[landing-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
LANDING_DIR=$ROOT_DIR/landing
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[landing-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
cd "$LANDING_DIR"
# Check if node_modules is populated (named volume may be empty on first run)
if [[ ! -d node_modules/.bin ]]; then
echo "[landing-entrypoint] Installing dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[landing-entrypoint] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[landing-entrypoint] Using cached node_modules from volume"
fi
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export HOST=0.0.0.0
export PORT=${PORT:-5174}
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "landing-entrypoint"
entrypoint_require_kraiken_lib "$ROOT_DIR" "landing-entrypoint"
cd "$ROOT_DIR/landing"
entrypoint_install_deps "landing-entrypoint"
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export HOST=0.0.0.0
export PORT=${PORT:-5174}
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -1,26 +1,41 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
cd /app/services/ponder
echo "[ponder-ci] Starting Ponder indexer..."
: "${DATABASE_URL:?DATABASE_URL is required}"
: "${PONDER_RPC_URL_1:?PONDER_RPC_URL_1 is required}"
export PONDER_RPC_TIMEOUT=${PONDER_RPC_TIMEOUT:-20000}
export HOST=${HOST:-0.0.0.0}
export PORT=${PORT:-42069}
cat > .env.local <<EOF
DATABASE_URL=${DATABASE_URL}
PONDER_RPC_URL_1=${PONDER_RPC_URL_1}
DATABASE_SCHEMA=${DATABASE_SCHEMA:-ponder_ci}
START_BLOCK=${START_BLOCK:-0}
EOF
echo "[ponder-ci] Environment configured:"
echo " DATABASE_URL: ${DATABASE_URL}"
echo " PONDER_RPC_URL_1: ${PONDER_RPC_URL_1}"
echo " START_BLOCK: ${START_BLOCK:-0}"
exec npm run dev
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[ponder-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[ponder-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[ponder-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
entrypoint_checkout_branch "$ROOT_DIR" "ponder-entrypoint"
CONTRACT_ENV=$ROOT_DIR/tmp/containers/contracts.env
PONDER_WORKDIR=$ROOT_DIR/services/ponder
@ -59,22 +74,8 @@ if [[ -n "$START_BLOCK" ]]; then
fi
fi
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[ponder-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
# Check if node_modules is populated (named volume may be empty on first run)
if [[ ! -d node_modules/.bin ]]; then
echo "[ponder-entrypoint] Installing dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[ponder-entrypoint] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[ponder-entrypoint] Using cached node_modules from volume"
fi
entrypoint_require_kraiken_lib "$ROOT_DIR" "ponder-entrypoint"
entrypoint_install_deps "ponder-entrypoint"
# Load and export all environment variables from .env.local
if [[ -f .env.local ]]; then

View file

@ -1,56 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[txn-bot-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[txn-bot-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[txn-bot-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
TXNBOT_ENV_FILE=$ROOT_DIR/tmp/containers/txnBot.env
BOT_DIR=$ROOT_DIR/services/txnBot
REQUIRED_DIST=$ROOT_DIR/kraiken-lib/dist/index.js
while [[ ! -f "$TXNBOT_ENV_FILE" ]]; do
echo "[txn-bot-entrypoint] waiting for env file"
sleep 2
done
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[txn-bot-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
cd "$BOT_DIR"
# Check if node_modules is populated (named volume may be empty on first run)
if [[ ! -d node_modules/.bin ]]; then
echo "[txn-bot-entrypoint] Installing txn-bot dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[txn-bot-entrypoint] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[txn-bot-entrypoint] Using cached node_modules from volume"
fi
echo "[txn-bot-entrypoint] Building TypeScript..."
npm run build
export TXN_BOT_ENV_FILE="$TXNBOT_ENV_FILE"
exec npm run start

62
containers/txnbot-entrypoint.sh Executable file
View file

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
echo "[txnbot-ci] Starting Transaction Bot..."
: "${TXNBOT_PRIVATE_KEY:?TXNBOT_PRIVATE_KEY is required}"
: "${RPC_URL:?RPC_URL is required}"
: "${KRAIKEN_ADDRESS:?KRAIKEN_ADDRESS is required}"
: "${STAKE_ADDRESS:?STAKE_ADDRESS is required}"
: "${LIQUIDITY_MANAGER_ADDRESS:?LIQUIDITY_MANAGER_ADDRESS is required}"
cat > /tmp/txnBot.env <<EOF
TXNBOT_PRIVATE_KEY=${TXNBOT_PRIVATE_KEY}
RPC_URL=${RPC_URL}
KRAIKEN_ADDRESS=${KRAIKEN_ADDRESS}
STAKE_ADDRESS=${STAKE_ADDRESS}
LIQUIDITY_MANAGER_ADDRESS=${LIQUIDITY_MANAGER_ADDRESS}
POOL_ADDRESS=${POOL_ADDRESS:-}
WETH_ADDRESS=${WETH_ADDRESS:-0x4200000000000000000000000000000000000006}
EOF
export TXN_BOT_ENV_FILE=/tmp/txnBot.env
echo "[txnbot-ci] Environment configured:"
echo " RPC_URL: ${RPC_URL}"
echo " KRAIKEN_ADDRESS: ${KRAIKEN_ADDRESS}"
echo "[txnbot-ci] Building TypeScript..."
npm run build
exec npm run start
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "txnbot-entrypoint"
TXNBOT_ENV_FILE=$ROOT_DIR/tmp/containers/txnBot.env
BOT_DIR=$ROOT_DIR/services/txnBot
while [[ ! -f "$TXNBOT_ENV_FILE" ]]; do
echo "[txnbot-entrypoint] waiting for env file"
sleep 2
done
entrypoint_require_kraiken_lib "$ROOT_DIR" "txnbot-entrypoint"
cd "$BOT_DIR"
entrypoint_install_deps "txnbot-entrypoint"
echo "[txnbot-entrypoint] Building TypeScript..."
npm run build
export TXN_BOT_ENV_FILE="$TXNBOT_ENV_FILE"
exec npm run start

View file

@ -1,68 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[webapp-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[webapp-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[webapp-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
CONTRACT_ENV=$ROOT_DIR/tmp/containers/contracts.env
APP_DIR=$ROOT_DIR/web-app
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
while [[ ! -f "$CONTRACT_ENV" ]]; do
echo "[frontend-entrypoint] waiting for contracts env"
sleep 2
done
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[frontend-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
# shellcheck disable=SC1090
source "$CONTRACT_ENV"
cd "$APP_DIR"
# Check if node_modules is populated (named volume may be empty on first run)
if [[ ! -d node_modules/.bin ]]; then
echo "[frontend-entrypoint] Installing dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[frontend-entrypoint] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[frontend-entrypoint] Using cached node_modules from volume"
fi
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_KRAIKEN_ADDRESS=$KRAIKEN
export VITE_STAKE_ADDRESS=$STAKE
export VITE_SWAP_ROUTER=$SWAP_ROUTER
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/

70
containers/webapp-entrypoint.sh Executable file
View file

@ -0,0 +1,70 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
cd /app/web-app
echo "[webapp-ci] Starting Web App..."
: "${VITE_KRAIKEN_ADDRESS:?VITE_KRAIKEN_ADDRESS is required}"
: "${VITE_STAKE_ADDRESS:?VITE_STAKE_ADDRESS is required}"
# Disable Vue DevTools in CI to avoid path resolution issues
export CI=true
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_SWAP_ROUTER=${VITE_SWAP_ROUTER:-0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4}
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
echo "[webapp-ci] Environment configured:"
echo " VITE_KRAIKEN_ADDRESS: ${VITE_KRAIKEN_ADDRESS}"
echo " VITE_STAKE_ADDRESS: ${VITE_STAKE_ADDRESS}"
echo " VITE_DEFAULT_CHAIN_ID: ${VITE_DEFAULT_CHAIN_ID}"
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "webapp-entrypoint"
CONTRACT_ENV=$ROOT_DIR/tmp/containers/contracts.env
APP_DIR=$ROOT_DIR/web-app
while [[ ! -f "$CONTRACT_ENV" ]]; do
echo "[webapp-entrypoint] waiting for contracts env"
sleep 2
done
entrypoint_require_kraiken_lib "$ROOT_DIR" "webapp-entrypoint"
# shellcheck disable=SC1090
source "$CONTRACT_ENV"
cd "$APP_DIR"
entrypoint_install_deps "webapp-entrypoint"
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_KRAIKEN_ADDRESS=$KRAIKEN
export VITE_STAKE_ADDRESS=$STAKE
export VITE_SWAP_ROUTER=$SWAP_ROUTER
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/

View file

@ -1,38 +0,0 @@
# CI-specific docker-compose file
# This provides a simplified interface for running the integration stack in Woodpecker CI
# Usage: docker-compose -f docker-compose.ci.yml up -d
version: "3.8"
services:
harb-stack:
build:
context: .
dockerfile: docker/Dockerfile.integration
privileged: true # Required for Docker-in-Docker
environment:
- HARB_ENV=BASE_SEPOLIA_LOCAL_FORK
- SKIP_WATCH=1
- COMPOSE_PROJECT_NAME=harb-ci
ports:
- "8081:8081" # Caddy (main API gateway)
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8081/api/graphql"]
interval: 5s
timeout: 3s
retries: 30
start_period: 120s
volumes:
# Mount the workspace so changes are reflected (for local testing)
- .:/workspace:cached
# Persist Docker state within the container
- harb-ci-docker:/var/lib/docker
networks:
- harb-ci-network
networks:
harb-ci-network:
driver: bridge
volumes:
harb-ci-docker:

View file

@ -88,7 +88,7 @@ services:
build:
context: .
dockerfile: containers/node-dev.Containerfile
entrypoint: ["/workspace/containers/ponder-dev-entrypoint.sh"]
entrypoint: ["/workspace/containers/ponder-entrypoint.sh"]
user: "0:0"
volumes:
- .:/workspace:z
@ -119,7 +119,7 @@ services:
build:
context: .
dockerfile: containers/node-dev.Containerfile
entrypoint: ["/workspace/containers/webapp-dev-entrypoint.sh"]
entrypoint: ["/workspace/containers/webapp-entrypoint.sh"]
user: "0:0"
volumes:
- .:/workspace:z
@ -141,14 +141,14 @@ services:
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:5173/"]
interval: 5s
retries: 6
retries: 24
start_period: 10s
landing:
build:
context: .
dockerfile: containers/node-dev.Containerfile
entrypoint: ["/workspace/containers/landing-dev-entrypoint.sh"]
entrypoint: ["/workspace/containers/landing-entrypoint.sh"]
user: "0:0"
volumes:
- .:/workspace:z
@ -175,7 +175,7 @@ services:
build:
context: .
dockerfile: containers/node-dev.Containerfile
entrypoint: ["/workspace/containers/txn-bot-entrypoint.sh"]
entrypoint: ["/workspace/containers/txnbot-entrypoint.sh"]
user: "0:0"
volumes:
- .:/workspace:z

View file

@ -1,50 +0,0 @@
# syntax=docker/dockerfile:1.6
# Composite integration image that bundles the entire Harb stack for E2E testing
# This image runs docker-compose internally to orchestrate all services
FROM docker:27-dind
LABEL org.opencontainers.image.source="https://codeberg.org/johba/harb-ci"
LABEL org.opencontainers.image.description="Harb Stack integration container for E2E CI tests"
ENV DOCKER_TLS_CERTDIR="" \
COMPOSE_PROJECT_NAME=harb-ci \
HARB_ENV=BASE_SEPOLIA_LOCAL_FORK \
SKIP_WATCH=1
# Install docker-compose, bash, curl, and other essentials
RUN apk add --no-cache \
bash \
curl \
git \
docker-cli-compose \
shadow \
su-exec
# Create a non-root user for running the stack
RUN addgroup -g 1000 harb && \
adduser -D -u 1000 -G harb harb
WORKDIR /workspace
# Copy the entire project (will be mounted at runtime in CI, but needed for standalone usage)
COPY --chown=harb:harb . /workspace/
# Pre-build kraiken-lib to speed up startup
RUN cd /workspace && \
if [ -f scripts/build-kraiken-lib.sh ]; then \
./scripts/build-kraiken-lib.sh || echo "kraiken-lib build skipped"; \
fi
# Healthcheck: verify the stack is responding via Caddy
HEALTHCHECK --interval=5s --timeout=3s --retries=30 --start-period=120s \
CMD curl -f http://localhost:8081/api/graphql || exit 1
# Entrypoint script to start Docker daemon and the stack
COPY docker/integration-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 8081
ENTRYPOINT ["/entrypoint.sh"]
CMD ["bash"]

View file

@ -1,57 +0,0 @@
# Production image for Landing page (Vite + Vue)
# Used in CI for E2E testing - contains all code baked in
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy package files first for better caching
COPY package.json package-lock.json ./
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
COPY landing/package.json landing/package-lock.json ./landing/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Install kraiken-lib dependencies and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN ./node_modules/.bin/tsc
# Install landing dependencies
WORKDIR /app/landing
RUN npm ci
# Copy landing source
COPY landing/ ./
# Production image
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget bash
WORKDIR /app
# Copy kraiken-lib (src for vite alias, dist for runtime)
COPY --from=builder /app/kraiken-lib/src ./kraiken-lib/src
COPY --from=builder /app/kraiken-lib/dist ./kraiken-lib/dist
COPY --from=builder /app/kraiken-lib/package.json ./kraiken-lib/
COPY --from=builder /app/landing ./landing
WORKDIR /app/landing
ENV NODE_ENV=development
ENV HOST=0.0.0.0
ENV PORT=5174
EXPOSE 5174
HEALTHCHECK --interval=5s --timeout=3s --retries=6 --start-period=10s \
CMD wget --spider -q http://127.0.0.1:5174/ || exit 1
# Landing doesn't need contract addresses - just serve static content
CMD ["npm", "run", "dev", "--", "--host", "0.0.0.0", "--port", "5174"]

View file

@ -1,67 +0,0 @@
# Production image for Ponder indexer service
# Used in CI for E2E testing - contains all code baked in
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy package files first for better caching
COPY package.json package-lock.json ./
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
COPY services/ponder/package.json services/ponder/package-lock.json ./services/ponder/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Install kraiken-lib dependencies and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN ./node_modules/.bin/tsc
# Install ponder dependencies
WORKDIR /app/services/ponder
RUN npm ci
# Copy ponder source
COPY services/ponder/ ./
# Copy shared config files needed by ponder
WORKDIR /app
COPY onchain/deployments*.json ./onchain/
# Production image
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget postgresql-client bash
WORKDIR /app
# Copy kraiken-lib with full structure (needed for node_modules symlink resolution)
COPY --from=builder /app/kraiken-lib ./kraiken-lib
# Copy ponder with all node_modules
COPY --from=builder /app/services/ponder ./services/ponder
# Copy onchain artifacts
COPY --from=builder /app/onchain ./onchain
# Copy entrypoint
COPY docker/ci-entrypoints/ponder-ci-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
WORKDIR /app/services/ponder
ENV NODE_ENV=production
ENV HOST=0.0.0.0
ENV PORT=42069
EXPOSE 42069
HEALTHCHECK --interval=5s --timeout=3s --retries=12 --start-period=20s \
CMD wget --spider -q http://127.0.0.1:42069/ || exit 1
ENTRYPOINT ["dumb-init", "--", "/entrypoint.sh"]

View file

@ -0,0 +1,113 @@
# Unified CI image for Harb services (ponder, webapp, landing, txnBot).
# Parameterized via build args — eliminates per-service Dockerfile duplication.
#
# Usage:
# docker build -f docker/Dockerfile.service-ci \
# --build-arg SERVICE_DIR=services/ponder \
# --build-arg SERVICE_PORT=42069 \
# --build-arg ENTRYPOINT_SCRIPT=containers/ponder-entrypoint.sh \
# -t ponder-ci .
# ── Build args (declared early for builder stage) ──────────────────
ARG SERVICE_DIR
ARG NPM_INSTALL_CMD=ci
# ── Builder stage ──────────────────────────────────────────────────
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy root package files
COPY package.json package-lock.json ./
# Copy kraiken-lib package files
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Copy Stake.sol for sync-tax-rates + the script itself
COPY onchain/src/Stake.sol ./onchain/src/
COPY scripts/sync-tax-rates.mjs ./scripts/
# Install kraiken-lib dependencies, run sync-tax-rates, and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN node ../scripts/sync-tax-rates.mjs && ./node_modules/.bin/tsc
# Install service dependencies
ARG SERVICE_DIR
ARG NPM_INSTALL_CMD
WORKDIR /app/${SERVICE_DIR}
COPY ${SERVICE_DIR}/package.json ./
# Use glob pattern to optionally copy package-lock.json (txnBot has none)
COPY ${SERVICE_DIR}/package-lock.jso[n] ./
RUN if [ "$NPM_INSTALL_CMD" = "install" ]; then npm install; else npm ci; fi
# Copy service source
COPY ${SERVICE_DIR}/ ./
# Copy onchain deployment artifacts (glob handles missing files)
WORKDIR /app
COPY onchain/deployments*.jso[n] ./onchain/
# ── Runtime stage ──────────────────────────────────────────────────
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget bash
WORKDIR /app
# Copy kraiken-lib (src for Vite alias, dist for runtime, package.json for resolution)
COPY --from=builder /app/kraiken-lib/src ./kraiken-lib/src
COPY --from=builder /app/kraiken-lib/dist ./kraiken-lib/dist
COPY --from=builder /app/kraiken-lib/package.json ./kraiken-lib/
# Copy service with all node_modules
ARG SERVICE_DIR
COPY --from=builder /app/${SERVICE_DIR} ./${SERVICE_DIR}
# Copy onchain artifacts
COPY --from=builder /app/onchain ./onchain
# Create placeholder deployments-local.json if not present
RUN test -f /app/onchain/deployments-local.json || \
(mkdir -p /app/onchain && echo '{"contracts":{}}' > /app/onchain/deployments-local.json)
# Conditionally create symlinks for Vite path resolution (webapp only)
ARG NEEDS_SYMLINKS=false
RUN if [ "$NEEDS_SYMLINKS" = "true" ]; then \
ln -sf /app/web-app /web-app && \
ln -sf /app/kraiken-lib /kraiken-lib && \
ln -sf /app/onchain /onchain; \
fi
# Copy entrypoint script
# For services with entrypoints (ponder, webapp, txnbot): pass the actual entrypoint
# For landing (no entrypoint): defaults to entrypoint-common.sh which is just helpers
ARG ENTRYPOINT_SCRIPT=containers/entrypoint-common.sh
COPY ${ENTRYPOINT_SCRIPT} /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Set working directory to service
WORKDIR /app/${SERVICE_DIR}
ARG NODE_ENV=production
ENV NODE_ENV=${NODE_ENV}
ENV HOST=0.0.0.0
ARG SERVICE_PORT=8080
ENV PORT=${SERVICE_PORT}
EXPOSE ${SERVICE_PORT}
ARG HEALTHCHECK_PATH=/
ARG HEALTHCHECK_RETRIES=12
ARG HEALTHCHECK_START=20s
HEALTHCHECK --interval=5s --timeout=3s --retries=${HEALTHCHECK_RETRIES} --start-period=${HEALTHCHECK_START} \
CMD wget --spider -q http://127.0.0.1:${PORT}${HEALTHCHECK_PATH} || exit 1
ENTRYPOINT ["dumb-init", "--", "/entrypoint.sh"]

View file

@ -1,64 +0,0 @@
# Production image for Transaction Bot service
# Used in CI for E2E testing - contains all code baked in
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy package files first for better caching
COPY package.json package-lock.json ./
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
COPY services/txnBot/package.json ./services/txnBot/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Install kraiken-lib dependencies and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN ./node_modules/.bin/tsc
# Install txnBot dependencies (no lock file for txnBot)
WORKDIR /app/services/txnBot
RUN npm install
# Copy txnBot source
COPY services/txnBot/ ./
# Copy shared config files
WORKDIR /app
COPY onchain/deployments*.json ./onchain/
# Production image
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget bash
WORKDIR /app
# Copy built artifacts
COPY --from=builder /app/kraiken-lib/dist ./kraiken-lib/dist
COPY --from=builder /app/kraiken-lib/package.json ./kraiken-lib/
COPY --from=builder /app/services/txnBot ./services/txnBot
COPY --from=builder /app/onchain ./onchain
# Copy entrypoint
COPY docker/ci-entrypoints/txnbot-ci-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
WORKDIR /app/services/txnBot
ENV NODE_ENV=production
ENV HOST=0.0.0.0
ENV PORT=43069
EXPOSE 43069
HEALTHCHECK --interval=5s --timeout=3s --retries=4 --start-period=10s \
CMD wget --spider -q http://127.0.0.1:43069/status || exit 1
ENTRYPOINT ["dumb-init", "--", "/entrypoint.sh"]

View file

@ -1,78 +0,0 @@
# Production image for Web App (Vite + Vue)
# Used in CI for E2E testing - contains all code baked in
# Includes filesystem symlinks for Vite path resolution in Docker
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy package files first for better caching
COPY package.json package-lock.json ./
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
COPY web-app/package.json web-app/package-lock.json ./web-app/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Install kraiken-lib dependencies and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN ./node_modules/.bin/tsc
# Install webapp dependencies
WORKDIR /app/web-app
RUN npm ci
# Copy webapp source
COPY web-app/ ./
# Production image
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget bash
WORKDIR /app
# Copy kraiken-lib (src for vite alias, dist for runtime)
COPY --from=builder /app/kraiken-lib/src ./kraiken-lib/src
COPY --from=builder /app/kraiken-lib/dist ./kraiken-lib/dist
COPY --from=builder /app/kraiken-lib/package.json ./kraiken-lib/
COPY --from=builder /app/web-app ./web-app
# Copy ABI files needed by kraiken-lib at compile time
COPY --from=builder /app/onchain/out ./onchain/out
# Create placeholder deployments-local.json for Vite compilation
# Actual contract addresses are provided via VITE_* environment variables at runtime
RUN mkdir -p /app/onchain && \
echo '{"contracts":{}}' > /app/onchain/deployments-local.json
# Create symlinks so Vite's path resolution works when base (/app/) is a prefix of root (/app/web-app)
# Vite's internal removeBase() can strip the /app/ prefix from filesystem paths, producing
# /web-app/src/... instead of /app/web-app/src/... — symlinks make both paths valid
RUN ln -s /app/web-app /web-app && \
ln -s /app/kraiken-lib /kraiken-lib && \
ln -s /app/onchain /onchain
# Copy entrypoint
COPY docker/ci-entrypoints/webapp-ci-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
WORKDIR /app/web-app
ENV NODE_ENV=development
ENV HOST=0.0.0.0
ENV PORT=5173
# Disable Vue DevTools in CI builds - vite.config.ts checks for CI=true
ENV CI=true
EXPOSE 5173
HEALTHCHECK --interval=5s --timeout=3s --retries=84 --start-period=15s \
CMD wget --spider -q http://127.0.0.1:5173/app/ || exit 1
ENTRYPOINT ["dumb-init", "--", "/entrypoint.sh"]

View file

@ -1,32 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change to the ponder directory (Woodpecker runs from /woodpecker/src/)
cd /app/services/ponder
echo "[ponder-ci] Starting Ponder indexer..."
# Required environment variables (set by Woodpecker from bootstrap step)
: "${DATABASE_URL:?DATABASE_URL is required}"
: "${PONDER_RPC_URL_1:?PONDER_RPC_URL_1 is required}"
# Optional with defaults
export PONDER_RPC_TIMEOUT=${PONDER_RPC_TIMEOUT:-20000}
export HOST=${HOST:-0.0.0.0}
export PORT=${PORT:-42069}
# Create .env.local from environment
cat > .env.local <<EOF
DATABASE_URL=${DATABASE_URL}
PONDER_RPC_URL_1=${PONDER_RPC_URL_1}
DATABASE_SCHEMA=${DATABASE_SCHEMA:-ponder_ci}
START_BLOCK=${START_BLOCK:-0}
EOF
echo "[ponder-ci] Environment configured:"
echo " DATABASE_URL: ${DATABASE_URL}"
echo " PONDER_RPC_URL_1: ${PONDER_RPC_URL_1}"
echo " START_BLOCK: ${START_BLOCK:-0}"
# Run ponder in dev mode (indexes and serves GraphQL)
exec npm run dev

View file

@ -1,35 +0,0 @@
#!/bin/bash
set -euo pipefail
echo "[txnbot-ci] Starting Transaction Bot..."
# Required environment variables (set by Woodpecker from bootstrap step)
: "${TXNBOT_PRIVATE_KEY:?TXNBOT_PRIVATE_KEY is required}"
: "${RPC_URL:?RPC_URL is required}"
: "${KRAIKEN_ADDRESS:?KRAIKEN_ADDRESS is required}"
: "${STAKE_ADDRESS:?STAKE_ADDRESS is required}"
: "${LIQUIDITY_MANAGER_ADDRESS:?LIQUIDITY_MANAGER_ADDRESS is required}"
# Create txnBot.env file from environment
cat > /tmp/txnBot.env <<EOF
TXNBOT_PRIVATE_KEY=${TXNBOT_PRIVATE_KEY}
RPC_URL=${RPC_URL}
KRAIKEN_ADDRESS=${KRAIKEN_ADDRESS}
STAKE_ADDRESS=${STAKE_ADDRESS}
LIQUIDITY_MANAGER_ADDRESS=${LIQUIDITY_MANAGER_ADDRESS}
POOL_ADDRESS=${POOL_ADDRESS:-}
WETH_ADDRESS=${WETH_ADDRESS:-0x4200000000000000000000000000000000000006}
EOF
export TXN_BOT_ENV_FILE=/tmp/txnBot.env
echo "[txnbot-ci] Environment configured:"
echo " RPC_URL: ${RPC_URL}"
echo " KRAIKEN_ADDRESS: ${KRAIKEN_ADDRESS}"
# Build TypeScript
echo "[txnbot-ci] Building TypeScript..."
npm run build
# Run the bot
exec npm run start

View file

@ -1,33 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change to the webapp directory (Woodpecker runs from /woodpecker/src/)
cd /app/web-app
echo "[webapp-ci] Starting Web App..."
# Required environment variables (set by Woodpecker from bootstrap step)
: "${VITE_KRAIKEN_ADDRESS:?VITE_KRAIKEN_ADDRESS is required}"
: "${VITE_STAKE_ADDRESS:?VITE_STAKE_ADDRESS is required}"
# Disable Vue DevTools in CI to avoid path resolution issues
# vite.config.ts checks for CI=true to skip vite-plugin-vue-devtools
export CI=true
# Defaults for CI environment
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_SWAP_ROUTER=${VITE_SWAP_ROUTER:-0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4}
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
echo "[webapp-ci] Environment configured:"
echo " VITE_KRAIKEN_ADDRESS: ${VITE_KRAIKEN_ADDRESS}"
echo " VITE_STAKE_ADDRESS: ${VITE_STAKE_ADDRESS}"
echo " VITE_DEFAULT_CHAIN_ID: ${VITE_DEFAULT_CHAIN_ID}"
# Run Vite dev server
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/

View file

@ -1,42 +0,0 @@
#!/bin/bash
set -euo pipefail
echo "[integration] Starting Docker daemon..."
# Start Docker daemon in the background
dockerd-entrypoint.sh dockerd &
DOCKERD_PID=$!
# Wait for Docker daemon to be ready
echo "[integration] Waiting for Docker daemon..."
timeout 30 sh -c 'until docker info >/dev/null 2>&1; do sleep 1; done'
echo "[integration] Docker daemon ready"
echo "[integration] Starting Harb stack..."
cd /workspace
# Build kraiken-lib if not already built
if [ ! -d "kraiken-lib/dist" ] || [ -z "$(ls -A kraiken-lib/dist 2>/dev/null)" ]; then
echo "[integration] Building kraiken-lib..."
./scripts/build-kraiken-lib.sh
fi
# Start the stack using dev.sh
echo "[integration] Launching stack via dev.sh..."
./scripts/dev.sh start
echo "[integration] Stack started successfully"
echo "[integration] Health endpoint: http://localhost:8081/api/graphql"
echo "[integration] Keeping container alive..."
# Keep the container running and forward signals to dockerd
trap "echo '[integration] Shutting down...'; ./scripts/dev.sh stop; kill $DOCKERD_PID; exit 0" SIGTERM SIGINT
# Wait for dockerd or run custom command if provided
if [ $# -gt 0 ]; then
echo "[integration] Executing: $*"
exec "$@"
else
wait $DOCKERD_PID
fi

View file

@ -4,11 +4,13 @@ The Docker stack powers `scripts/dev.sh` using containerized services. Every boo
## Service Topology
- `anvil` Base Sepolia fork with optional mnemonic from `onchain/.secret.local`
- `bootstrap` one-shot job running `DeployLocal.sol`, seeding liquidity, priming blocks, and writing shared env files
- `ponder` `npm run dev` for the indexer (port 42069 inside the pod)
- `frontend` Vite dev server for `web-app` (port 5173 inside the pod)
- `txn-bot` automation loop plus Express status API (port 43069 inside the pod)
- `caddy` front door at `http://<host>:80`, routing `/api/graphql`, `/health`, `/api/rpc`, and `/api/txn` to the internal services
- `bootstrap` one-shot job running `DeployLocal.sol`, seeding liquidity, priming blocks, and writing shared env files (uses `scripts/bootstrap-common.sh`)
- `postgres` PostgreSQL 16 database for Ponder indexer state
- `ponder` `npm run dev` for the indexer (port 42069)
- `webapp` Vite dev server for `web-app` (port 5173)
- `landing` Vite dev server for landing page (port 5174)
- `txn-bot` automation loop plus Express status API (port 43069)
- `caddy` reverse proxy at `http://localhost:8081`, routing `/app/` → webapp, `/api/graphql` → ponder, `/api/rpc` → anvil, `/` → landing
All containers mount the repository so code edits hot-reload exactly as the local script. Named volumes keep `node_modules` caches between restarts.

157
scripts/bootstrap-common.sh Executable file
View file

@ -0,0 +1,157 @@
#!/usr/bin/env bash
# Shared bootstrap functions for local dev and CI.
# Source this file after setting these variables:
# ANVIL_RPC - Anvil JSON-RPC URL (required)
# DEPLOYER_PK - Deployer private key (defaults to Anvil account 0)
# DEPLOYER_ADDR - Deployer address (defaults to Anvil account 0)
# TXNBOT_ADDRESS - TxnBot wallet address (optional)
# TXNBOT_PRIVATE_KEY- TxnBot private key (optional)
# TXNBOT_FUND_VALUE - Amount to fund txnBot (default: 1ether)
# CONTRACT_ENV - Path to write contracts.env (required)
# LOG_FILE - Log file for cast/forge output (default: /dev/null)
# ONCHAIN_DIR - Path to onchain/ directory (required)
# KRAIKEN_LIB_DIR - Path to kraiken-lib/ directory (optional, for CI build)
set -euo pipefail
# ── Constants ──────────────────────────────────────────────────────────
FEE_DEST=0xf6a3eef9088A255c32b6aD2025f83E57291D9011
WETH=0x4200000000000000000000000000000000000006
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
MAX_UINT=0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
DEFAULT_DEPLOYER_PK=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
DEFAULT_DEPLOYER_ADDR=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266
DEFAULT_TXNBOT_PK=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
DEFAULT_TXNBOT_ADDR=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
# ── Defaults ───────────────────────────────────────────────────────────
DEPLOYER_PK=${DEPLOYER_PK:-$DEFAULT_DEPLOYER_PK}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$DEFAULT_DEPLOYER_ADDR}
TXNBOT_FUND_VALUE=${TXNBOT_FUND_VALUE:-1ether}
LOG_FILE=${LOG_FILE:-/dev/null}
# ── Helpers ────────────────────────────────────────────────────────────
bootstrap_log() {
echo "[bootstrap] $*"
}
# ── Functions ──────────────────────────────────────────────────────────
wait_for_rpc() {
for _ in {1..120}; do
if cast chain-id --rpc-url "$ANVIL_RPC" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
bootstrap_log "Timed out waiting for Anvil at $ANVIL_RPC"
return 1
}
run_forge_script() {
bootstrap_log "Deploying contracts to fork"
pushd "$ONCHAIN_DIR" >/dev/null
forge script script/DeployLocal.sol --fork-url "$ANVIL_RPC" --broadcast >>"$LOG_FILE" 2>&1
popd >/dev/null
}
extract_addresses() {
local run_file
run_file="$(ls -t "$ONCHAIN_DIR/broadcast/DeployLocal.sol"/*/run-latest.json 2>/dev/null | head -n1)"
if [[ -z "$run_file" ]]; then
bootstrap_log "Deployment artifact not found"
exit 1
fi
bootstrap_log "Using artifact $run_file"
LIQUIDITY_MANAGER="$(jq -r '.transactions[] | select(.contractName=="LiquidityManager") | .contractAddress' "$run_file" | head -n1)"
KRAIKEN="$(jq -r '.transactions[] | select(.contractName=="Kraiken") | .contractAddress' "$run_file" | head -n1)"
STAKE="$(jq -r '.transactions[] | select(.contractName=="Stake") | .contractAddress' "$run_file" | head -n1)"
DEPLOY_BLOCK="$(jq -r '.receipts[0].blockNumber' "$run_file" | xargs printf "%d")"
if [[ -z "$LIQUIDITY_MANAGER" || "$LIQUIDITY_MANAGER" == "null" ]]; then
bootstrap_log "LiquidityManager address missing"
exit 1
fi
}
write_contracts_env() {
cat >"$CONTRACT_ENV" <<EOCONTRACTS
LIQUIDITY_MANAGER=$LIQUIDITY_MANAGER
KRAIKEN=$KRAIKEN
STAKE=$STAKE
EOCONTRACTS
}
fund_liquidity_manager() {
bootstrap_log "Funding LiquidityManager"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$LIQUIDITY_MANAGER" --value 0.1ether >>"$LOG_FILE" 2>&1
}
grant_recenter_access() {
bootstrap_log "Granting recenter access to deployer"
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$LOG_FILE" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$DEPLOYER_ADDR" >>"$LOG_FILE" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$LOG_FILE" 2>&1
if [[ -n "${TXNBOT_ADDRESS:-}" ]]; then
bootstrap_log "Granting recenter access to txnBot ($TXNBOT_ADDRESS)"
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$LOG_FILE" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$TXNBOT_ADDRESS" >>"$LOG_FILE" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$LOG_FILE" 2>&1
fi
}
call_recenter() {
local recenter_pk="$DEPLOYER_PK"
local recenter_addr="$DEPLOYER_ADDR"
if [[ -n "${TXNBOT_ADDRESS:-}" ]]; then
recenter_pk="$TXNBOT_PRIVATE_KEY"
recenter_addr="$TXNBOT_ADDRESS"
fi
bootstrap_log "Calling recenter() via $recenter_addr"
cast send --rpc-url "$ANVIL_RPC" --private-key "$recenter_pk" \
"$LIQUIDITY_MANAGER" "recenter()" >>"$LOG_FILE" 2>&1
}
seed_application_state() {
bootstrap_log "Wrapping ETH to WETH"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "deposit()" --value 0.02ether >>"$LOG_FILE" 2>&1
bootstrap_log "Approving router"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "approve(address,uint256)" "$SWAP_ROUTER" "$MAX_UINT" >>"$LOG_FILE" 2>&1
bootstrap_log "Executing initial KRK swap"
cast send --legacy --gas-limit 300000 --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$SWAP_ROUTER" "exactInputSingle((address,address,uint24,address,uint256,uint256,uint160))" \
"($WETH,$KRAIKEN,10000,$DEPLOYER_ADDR,10000000000000000,0,0)" >>"$LOG_FILE" 2>&1
}
fund_txn_bot_wallet() {
if [[ -z "${TXNBOT_ADDRESS:-}" ]]; then
return
fi
bootstrap_log "Funding txnBot wallet $TXNBOT_ADDRESS with $TXNBOT_FUND_VALUE"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$TXNBOT_ADDRESS" --value "$TXNBOT_FUND_VALUE" >>"$LOG_FILE" 2>&1 || true
local wei hex
wei="$(cast --to-unit "$TXNBOT_FUND_VALUE" wei)"
hex="$(cast --to-hex "$wei")"
cast rpc --rpc-url "$ANVIL_RPC" anvil_setBalance "$TXNBOT_ADDRESS" "$hex" >>"$LOG_FILE" 2>&1
}
write_deployments_json() {
local target="${1:-$ONCHAIN_DIR/deployments-local.json}"
cat >"$target" <<EODEPLOYMENTS
{
"contracts": {
"Kraiken": "$KRAIKEN",
"Stake": "$STAKE",
"LiquidityManager": "$LIQUIDITY_MANAGER"
}
}
EODEPLOYMENTS
}

View file

@ -1,5 +1,6 @@
#!/bin/bash
# Build and push CI images for E2E testing
# Build CI images locally using the unified Dockerfile.service-ci
# For CI pipeline builds, see .woodpecker/build-ci-images.yml
set -euo pipefail
cd "$(dirname "$0")/.."
@ -15,7 +16,12 @@ echo "Tag: $TAG"
echo ""
echo "=== Building ponder-ci ==="
docker build \
-f docker/Dockerfile.ponder-ci \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/ponder \
--build-arg SERVICE_PORT=42069 \
--build-arg ENTRYPOINT_SCRIPT=containers/ponder-entrypoint.sh \
--build-arg HEALTHCHECK_RETRIES=12 \
--build-arg HEALTHCHECK_START=20s \
-t "$REGISTRY/harb/ponder-ci:$TAG" \
.
@ -23,7 +29,15 @@ docker build \
echo ""
echo "=== Building webapp-ci ==="
docker build \
-f docker/Dockerfile.webapp-ci \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=web-app \
--build-arg SERVICE_PORT=5173 \
--build-arg HEALTHCHECK_PATH=/app/ \
--build-arg HEALTHCHECK_RETRIES=84 \
--build-arg HEALTHCHECK_START=15s \
--build-arg ENTRYPOINT_SCRIPT=containers/webapp-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg NEEDS_SYMLINKS=true \
-t "$REGISTRY/harb/webapp-ci:$TAG" \
.
@ -31,7 +45,13 @@ docker build \
echo ""
echo "=== Building landing-ci ==="
docker build \
-f docker/Dockerfile.landing-ci \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=landing \
--build-arg SERVICE_PORT=5174 \
--build-arg ENTRYPOINT_SCRIPT=containers/landing-ci-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg HEALTHCHECK_RETRIES=6 \
--build-arg HEALTHCHECK_START=10s \
-t "$REGISTRY/harb/landing-ci:$TAG" \
.
@ -39,7 +59,14 @@ docker build \
echo ""
echo "=== Building txnbot-ci ==="
docker build \
-f docker/Dockerfile.txnbot-ci \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/txnBot \
--build-arg SERVICE_PORT=43069 \
--build-arg HEALTHCHECK_PATH=/status \
--build-arg HEALTHCHECK_RETRIES=4 \
--build-arg HEALTHCHECK_START=10s \
--build-arg ENTRYPOINT_SCRIPT=containers/txnbot-entrypoint.sh \
--build-arg NPM_INSTALL_CMD=install \
-t "$REGISTRY/harb/txnbot-ci:$TAG" \
.

View file

@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")/.."
REGISTRY="${REGISTRY:-registry.niovi.voyage}"
IMAGE_NAME="${IMAGE_NAME:-harb/integration}"
TAG="${TAG:-latest}"
FULL_IMAGE="${REGISTRY}/${IMAGE_NAME}:${TAG}"
echo "Building integration image: ${FULL_IMAGE}"
echo "This may take 5-10 minutes on first build..."
# Build kraiken-lib first (required by the image)
echo "=== Building kraiken-lib ==="
./scripts/build-kraiken-lib.sh
# Build the integration image
echo "=== Building Docker image ==="
docker build \
-f docker/Dockerfile.integration \
-t "${FULL_IMAGE}" \
--progress=plain \
.
echo ""
echo "✓ Image built successfully: ${FULL_IMAGE}"
echo ""
echo "To test locally:"
echo " docker run --rm --privileged -p 8081:8081 ${FULL_IMAGE}"
echo ""
echo "To push to registry:"
echo " docker push ${FULL_IMAGE}"

74
scripts/ci-bootstrap.sh Executable file
View file

@ -0,0 +1,74 @@
#!/usr/bin/env bash
# CI bootstrap script — runs under bash to support 'source' and bash-isms.
# Env vars ANVIL_RPC, CONTRACT_ENV, LOG_FILE, ONCHAIN_DIR, TXNBOT_FUND_VALUE,
# TXNBOT_ADDRESS, TXNBOT_PRIVATE_KEY must be set before calling this script.
set -euo pipefail
echo "=== Foundry version ==="
forge --version
cast --version
# Source shared bootstrap functions
source scripts/bootstrap-common.sh
echo "=== Waiting for Anvil ==="
wait_for_rpc
echo "=== Deploying contracts ==="
run_forge_script
extract_addresses
echo "=== Contract Deployment Complete ==="
echo "KRAIKEN: $KRAIKEN"
echo "STAKE: $STAKE"
echo "LIQUIDITY_MANAGER: $LIQUIDITY_MANAGER"
echo "DEPLOY_BLOCK: $DEPLOY_BLOCK"
# Build kraiken-lib BEFORE writing contracts.env
# (services wait for contracts.env, so kraiken-lib must be ready first)
echo "=== Building kraiken-lib (shared dependency) ==="
cd kraiken-lib
npm ci --ignore-scripts
if [[ -f ../scripts/sync-tax-rates.mjs ]]; then
node ../scripts/sync-tax-rates.mjs
fi
./node_modules/.bin/tsc
cd ..
# Get current block number as start block
START_BLOCK=$(cast block-number --rpc-url "$ANVIL_RPC")
# Write contracts.env with CI-specific extra vars
{
echo "KRAIKEN=$KRAIKEN"
echo "STAKE=$STAKE"
echo "LIQUIDITY_MANAGER=$LIQUIDITY_MANAGER"
echo "START_BLOCK=$START_BLOCK"
echo "PONDER_RPC_URL_1=http://anvil:8545"
echo "DATABASE_URL=postgres://ponder:ponder_local@postgres:5432/ponder_local"
echo "RPC_URL=http://anvil:8545"
} > "$CONTRACT_ENV"
# Write deployments-local.json for E2E tests
write_deployments_json "$ONCHAIN_DIR/deployments-local.json"
echo "=== deployments-local.json written ==="
cat "$ONCHAIN_DIR/deployments-local.json"
echo "=== Funding LiquidityManager ==="
fund_liquidity_manager
echo "=== Granting recenter access ==="
grant_recenter_access
echo "=== Calling recenter() to seed liquidity ==="
call_recenter
echo "=== Seeding application state (initial swap) ==="
seed_application_state
echo "=== Funding txnBot ==="
fund_txn_bot_wallet
echo "TXNBOT_PRIVATE_KEY=$TXNBOT_PRIVATE_KEY" >> "$CONTRACT_ENV"
echo "=== Bootstrap complete ==="

View file

@ -34,7 +34,8 @@ fi
container_name() {
local service="$1"
echo "${PROJECT_NAME}_${service}_1"
# docker compose v2 uses hyphens; v1 used underscores
echo "${PROJECT_NAME}-${service}-1"
}
# Check Docker disk usage and warn if approaching limits
@ -201,6 +202,13 @@ start_stack() {
wait_for_healthy "$(container_name caddy)" "$CADDY_TIMEOUT" || exit 1
# Smoke test: verify end-to-end connectivity through Caddy
echo " Running smoke test..."
./scripts/wait-for-service.sh http://localhost:8081/app/ 30 "caddy-proxy" || {
echo " [!!] Smoke test failed — Caddy proxy not serving /app/"
exit 1
}
if [[ -z "${SKIP_WATCH:-}" ]]; then
echo "Watching for kraiken-lib changes..."
./scripts/watch-kraiken-lib.sh &

26
scripts/wait-for-service.sh Executable file
View file

@ -0,0 +1,26 @@
#!/usr/bin/env bash
# Wait for an HTTP service to respond with 2xx.
# Usage: wait-for-service.sh <url> [timeout_seconds] [label]
set -euo pipefail
URL="${1:?Usage: wait-for-service.sh <url> [timeout_seconds] [label]}"
TIMEOUT="${2:-120}"
LABEL="${3:-$URL}"
INTERVAL=5
ATTEMPTS=$((TIMEOUT / INTERVAL))
if (( ATTEMPTS < 1 )); then ATTEMPTS=1; fi
for i in $(seq 1 "$ATTEMPTS"); do
if curl -sf --max-time 3 "$URL" > /dev/null 2>&1; then
echo "[wait] $LABEL healthy after $((i * INTERVAL))s"
exit 0
fi
echo "[wait] ($i/$ATTEMPTS) $LABEL not ready..."
sleep "$INTERVAL"
done
echo "[wait] ERROR: $LABEL not healthy after ${TIMEOUT}s"
echo "--- Diagnostic: $URL ---"
curl -v --max-time 5 "$URL" 2>&1 | head -20 || true
exit 1

View file

@ -227,7 +227,7 @@ Use `./scripts/watch-kraiken-lib.sh` to rebuild `kraiken-lib` on file changes an
**Location**: `tests/e2e/` (repo root)
**Framework**: Playwright
**Coverage**: Complete user journeys (mint ETH → swap KRK → stake)
**References**: See `INTEGRATION_TEST_STATUS.md` and `SWAP_VERIFICATION.md`
**CI**: Woodpecker e2e pipeline runs these against pre-built service images
**Test Strategy**:
- Use mocked wallet provider with Anvil accounts

View file

@ -46,4 +46,4 @@ Tests should rely on these roles and labels instead of private helpers.
### E2E Tests
See `INTEGRATION_TEST_STATUS.md` in the repository root for complete testing documentation.
Run `npm run test:e2e` from the repo root. Tests use Playwright against the full Docker stack. In CI, the Woodpecker e2e pipeline handles this automatically.