Compare commits

..

23 commits

Author SHA1 Message Date
openhands
ce87847f4e web-app: move env declaration into src and guard pipefail 2025-10-13 19:05:15 +00:00
openhands
d2c1f9c84f ci: switch npm ci to npm install for workspace deps 2025-10-13 18:20:47 +00:00
openhands
ea0d35486b ci: export foundry path in runner scripts 2025-10-13 18:15:13 +00:00
openhands
d351acde78 ci: pin node/playwright images to ci-20251013 tag 2025-10-13 18:12:14 +00:00
openhands
d0ebb0ecf1 ci: adjust yarn --cwd invocation 2025-10-13 18:03:14 +00:00
openhands
953bec25c5 ci: prebuild node/playwright images and switch woodpecker 2025-10-13 17:01:51 +00:00
openhands
76e197a21b Fix npm workspaces in CI 2025-10-12 19:54:02 +00:00
openhands
ea0226179c Drop forge size check in CI 2025-10-12 19:27:23 +00:00
openhands
7aba3bb87e Disable bytecode limit for maxperf profile 2025-10-12 19:23:13 +00:00
openhands
f7e0b4fd24 Disable Foundry bytecode size enforcement for tests 2025-10-12 19:13:42 +00:00
openhands
861bad5b00 Raise local bytecode limit for test helpers 2025-10-12 17:44:53 +00:00
openhands
10f011f136 Install uni-v3-lib dependencies before Foundry runs 2025-10-12 17:30:29 +00:00
openhands
9637fe6df1 Use Foundry prebuilt image for solidity workflows 2025-10-12 17:20:01 +00:00
openhands
91f19539d9 Run foundry suite before node lint 2025-10-12 17:07:47 +00:00
openhands
c2f1690b0d Preserve multiline bash scripts in pipelines 2025-10-12 16:59:55 +00:00
openhands
3915e121bd Wrap docker steps in bash shells 2025-10-12 16:56:42 +00:00
openhands
06b5f6302c Tune release workflow filters 2025-10-12 16:52:44 +00:00
openhands
12f322d441 Run release workflow in containerized steps 2025-10-12 16:28:39 +00:00
openhands
31728401ce Remove shell brace expansions in release workflow 2025-10-12 16:26:33 +00:00
openhands
f5617ad9bf Scope PATH setup to e2e step 2025-10-12 16:18:58 +00:00
openhands
3195ab8725 Use simple runtime dir expansion in e2e workflow 2025-10-12 16:17:46 +00:00
openhands
66cfaed355 Refine Woodpecker e2e workflow 2025-10-12 16:11:17 +00:00
openhands
c8180a30f3 Add Woodpecker pipelines 2025-10-12 15:59:11 +00:00
698 changed files with 12650 additions and 105633 deletions

View file

@ -1,26 +0,0 @@
# Claude Code Supervisor configuration
# Copy to ~/.config/claude-code-supervisor/config.yml
# or .claude-code-supervisor.yml in your project root.
triage:
# Command that accepts a prompt on stdin and returns text on stdout.
# Default: claude -p (uses Claude Code's own auth)
command: "claude -p --no-session-persistence"
model: "claude-haiku-4-5-20251001"
max_tokens: 150
notify:
# Command that receives a JSON string as its last argument.
# Called when triage determines action is needed.
# Examples:
# openclaw: openclaw gateway call wake --params
# ntfy: curl -s -X POST https://ntfy.sh/my-topic -d
# webhook: curl -s -X POST https://example.com/hook -H 'Content-Type: application/json' -d
# script: /path/to/my-notify.sh
command: "openclaw gateway call wake --params"
# Quiet hours — suppress non-urgent escalations
quiet_hours:
start: "23:00"
end: "08:00"
timezone: "Europe/Berlin"

View file

@ -1,49 +0,0 @@
name: Bug Report
about: Something is broken or behaving incorrectly
labels:
- bug
body:
- type: textarea
id: what
attributes:
label: What's broken
description: What happens vs what should happen. Include error messages, logs, or screenshots.
validations:
required: true
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce
description: Minimal steps to trigger the bug.
placeholder: |
1. Run `forge test --match-test testFoo`
2. See error: "revert: ..."
validations:
required: true
- type: textarea
id: affected-files
attributes:
label: Affected files
description: Which source files need to change? Include test files that cover this area.
placeholder: |
- onchain/src/Optimizer.sol
- onchain/test/Optimizer.t.sol
validations:
required: true
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
description: How do we know it's fixed?
placeholder: |
- [ ] Bug no longer reproduces with steps above
- [ ] Regression test added
- [ ] CI green
validations:
required: true
- type: textarea
id: deps
attributes:
label: Dependencies
description: Issues that must be merged first. Leave empty if none.
placeholder: "- #NNN (reason)"

View file

@ -1,51 +0,0 @@
name: Feature
about: New functionality or enhancement
labels:
- backlog
body:
- type: textarea
id: problem
attributes:
label: Problem / motivation
description: Why is this needed? What's the current limitation?
validations:
required: true
- type: textarea
id: solution
attributes:
label: Proposed solution
description: How should it work? Be specific about behavior, not just "add X."
validations:
required: true
- type: textarea
id: affected-files
attributes:
label: Affected files
description: Which files need to change? Include e2e test files that may break or need updating.
placeholder: |
- tools/push3-evolution/evolve.sh
- tools/push3-evolution/test/evolve.test.ts (new)
validations:
required: true
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
description: Checkboxes. Max 5 — if you need more, split the issue.
placeholder: |
- [ ] Feature works as described
- [ ] Tests added / updated
- [ ] CI green
validations:
required: true
- type: textarea
id: deps
attributes:
label: Dependencies
description: Issues that must be merged first. Leave empty if none.
placeholder: "- #NNN (reason)"
- type: textarea
id: context
attributes:
label: Additional context
description: Links to docs, prior art, design decisions, related issues.

View file

@ -1,62 +0,0 @@
name: Push3 Seed Variant
about: Write a new optimizer strategy as a Push3 program for the evolution kindergarten
labels:
- backlog
body:
- type: textarea
id: strategy
attributes:
label: Strategy philosophy
description: One paragraph describing the optimizer's approach. What's the core idea?
placeholder: "This optimizer prioritizes floor position depth over everything else. Philosophy: if the floor never moves down, ETH is safe."
validations:
required: true
- type: textarea
id: behavior
attributes:
label: Expected behavior
description: How should each output parameter respond to inputs? Be specific.
placeholder: |
- CI: always 0 (no VWAP bias)
- anchorShare: low (10-20% of ETH)
- anchorWidth: narrow (10-30 ticks)
- discoveryDepth: minimal
- Responds to: percentageStaked (slot 0), averageTaxRate (slot 1)
validations:
required: true
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
description: Standard for all seed variants.
value: |
- [ ] Push3 file created at `tools/push3-evolution/seeds/llm_<name>.push3`
- [ ] Transpiles without error: `npx tsx tools/push3-transpiler/src/index.ts <file> /tmp/test.sol`
- [ ] Produced Solidity compiles: `forge build`
- [ ] Entry added to `tools/push3-evolution/seeds/manifest.jsonl` with all required fields:
- `file` — filename of the `.push3` seed (e.g. `"llm_my_strategy.push3"`)
- `fitness` — raw integer score from the evaluator, or `null` if not yet evaluated
- `origin` — one of `"hand-written"`, `"llm"`, or `"evolved"`
- `run` — evolution run ID (integer), or `null` for hand-written/LLM seeds
- `generation` — generation index within the run (integer), or `null` for hand-written/LLM seeds
- `date` — ISO date the entry was added (e.g. `"2026-03-14"`)
- `note` — one-sentence description of the strategy and any known caveats
validations:
required: true
- type: textarea
id: reference
attributes:
label: Reference files
description: Key files for understanding Push3 syntax and the transpiler.
value: |
- Transpiler source: `tools/push3-transpiler/src/transpiler.ts` (defines all Push3 opcodes)
- Existing seed: `tools/push3-transpiler/optimizer_v3.push3` (current production optimizer)
- Evolution seed: `tools/push3-transpiler/optimizer_seed.push3` (simpler starting point)
- Push3 uses named bindings via `DYADIC.DEFINE` (e.g. `PERCENTAGESTAKED DYADIC.DEFINE`)
- Outputs: 4 values left on the DYADIC stack (top to bottom): ci, anchorShare, anchorWidth, discoveryDepth
- Inputs: 8 dyadic rational slots pushed onto stack (slot 0=percentageStaked on top, slot 1=averageTaxRate, 2-7=normalized indicators)
- type: textarea
id: deps
attributes:
label: Dependencies
value: "- #667 (seed kindergarten — directory structure and manifest must exist first)"

View file

@ -1,50 +0,0 @@
name: Refactor / Tech Debt
about: Code improvement without changing behavior
labels:
- backlog
body:
- type: textarea
id: what
attributes:
label: What needs cleaning up
description: Current state and why it's a problem.
validations:
required: true
- type: textarea
id: approach
attributes:
label: Approach
description: How to fix it. Specifics matter — the dev-agent will follow this literally.
validations:
required: true
- type: textarea
id: affected-files
attributes:
label: Affected files
description: Every file that will change. Include test files.
validations:
required: true
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
placeholder: |
- [ ] Refactored code works identically (no behavior change)
- [ ] Existing tests still pass
- [ ] CI green
validations:
required: true
- type: textarea
id: deps
attributes:
label: Dependencies
description: Issues that must be merged first. Leave empty if none.
placeholder: "- #NNN (reason)"
- type: textarea
id: risks
attributes:
label: Risks
description: What could break? Which e2e tests cover this area?
placeholder: |
- e2e/staking.spec.ts — exercises the staking flow that touches these files
- Risk: CSS class rename could break selectors

View file

@ -1,76 +0,0 @@
# Exclude large directories and unnecessary files from Docker build context
# Git
.git/
.github/
# CI
.woodpecker/
# Dependencies (will be installed during build)
node_modules/
**/node_modules/
.pnpm-store/
.npm/
.yarn/
# Build outputs
dist/
build/
out/
.next/
.nuxt/
.cache/
# Development
.vscode/
.idea/
*.swp
*.swo
*~
# Logs
*.log
logs/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Test artifacts
test-results/
playwright-report/
coverage/
# Temporary files
tmp/
temp/
*.tmp
# OS files
.DS_Store
Thumbs.db
# Ponder
.ponder/
services/ponder/.ponder/
# Docker
docker-compose.override.yml
# Environment files
.env
.env.*
!.env.example
# Foundry artifacts (most will be built during bootstrap)
# But keep ABI JSON files needed by kraiken-lib
onchain/out/
!onchain/out/Kraiken.sol/
!onchain/out/Kraiken.sol/Kraiken.json
!onchain/out/Stake.sol/
!onchain/out/Stake.sol/Stake.json
onchain/cache/
onchain/broadcast/
# Artifacts
artifacts/

11
.gitignore vendored
View file

@ -15,7 +15,8 @@ out/
.infura
.DS_Store
**/node_modules/
/onchain/lib/**/node-modules/
onchain/node_modules/
# Ignore vim files:
*~
@ -26,6 +27,7 @@ ponder-repo
tmp
foundry.lock
services/ponder/.env.local
node_modules
# Test artifacts
test-results/
@ -35,10 +37,3 @@ services/ponder/.ponder/
# Temporary files
/tmp/
logs/
# Holdout scenarios (cloned at runtime by evaluate.sh)
.holdout-scenarios/
# Local deployment addresses (generated per-run by bootstrap scripts)
onchain/deployments-local.json

7
.gitmodules vendored
View file

@ -7,13 +7,12 @@
[submodule "onchain/lib/uni-v3-lib"]
path = onchain/lib/uni-v3-lib
url = https://github.com/Aperture-Finance/uni-v3-lib
ignore = dirty
[submodule "onchain/lib/pt-v5-twab-controller"]
path = onchain/lib/pt-v5-twab-controller
url = https://github.com/GenerationSoftware/pt-v5-twab-controller
[submodule "onchain/lib/openzeppelin-contracts"]
path = onchain/lib/openzeppelin-contracts
url = https://github.com/openzeppelin/openzeppelin-contracts
[submodule "onchain/lib/abdk-libraries-solidity"]
path = onchain/lib/abdk-libraries-solidity
url = https://github.com/abdk-consulting/abdk-libraries-solidity
[submodule "onchain/lib/pt-v5-twab-controller"]
path = onchain/lib/pt-v5-twab-controller
url = https://github.com/GenerationSoftware/pt-v5-twab-controller

View file

@ -21,9 +21,8 @@ if [ -f "onchain/src/Kraiken.sol" ] && [ -f "kraiken-lib/src/version.ts" ]; then
echo " Library VERSION: $LIB_VERSION"
echo " Compatible versions: $COMPATIBLE"
# Check if contract version is in compatible list (strip spaces for reliable matching)
COMPATIBLE_CLEAN=$(echo "$COMPATIBLE" | tr -d ' ')
if echo ",$COMPATIBLE_CLEAN," | grep -q ",$CONTRACT_VERSION,"; then
# Check if contract version is in compatible list
if echo ",$COMPATIBLE," | grep -q ",$CONTRACT_VERSION,"; then
echo " ✓ Version sync validated"
else
echo " ❌ Version validation failed!"

View file

@ -1,4 +0,0 @@
{
"tests/**/*.ts": ["eslint"],
"scripts/**/*.ts": ["eslint"]
}

View file

@ -1,144 +0,0 @@
# Build and push CI images for E2E testing services
# Triggered on changes to service code or Dockerfiles
kind: pipeline
type: docker
name: build-ci-images
when:
event: push
branch:
- master
- feature/ci
path:
include:
- .woodpecker/build-ci-images.yml
- docker/Dockerfile.service-ci
- docker/Dockerfile.node-ci
- containers/*-entrypoint.sh
- containers/entrypoint-common.sh
- kraiken-lib/**
- onchain/**
- services/ponder/**
- services/txnBot/**
- web-app/**
- landing/**
- scripts/sync-tax-rates.mjs
- scripts/bootstrap-common.sh
steps:
# Compile Solidity contracts to generate ABI files needed by Dockerfiles
- name: compile-contracts
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -c '
set -euo pipefail
# Initialize git submodules (required for forge dependencies)
git submodule update --init --recursive
# Install uni-v3-lib dependencies (required for Uniswap interfaces)
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
# Build contracts to generate ABI files
cd onchain
export PATH=/root/.foundry/bin:$PATH
forge build
'
- name: build-and-push-images
image: docker:27-cli
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
REGISTRY: registry.niovi.voyage
REGISTRY_USER: ciuser
REGISTRY_PASSWORD:
from_secret: registry_password
commands:
- |
set -eux
# Login to registry
echo "$REGISTRY_PASSWORD" | docker login "$REGISTRY" -u "$REGISTRY_USER" --password-stdin
SHA="${CI_COMMIT_SHA:0:7}"
# Build and push node-ci (base image with Foundry pre-installed)
echo "=== Building node-ci ==="
docker build \
-f docker/Dockerfile.node-ci \
-t "$REGISTRY/harb/node-ci:$SHA" \
-t "$REGISTRY/harb/node-ci:latest" \
.
docker push "$REGISTRY/harb/node-ci:$SHA"
docker push "$REGISTRY/harb/node-ci:latest"
# Build and push ponder-ci (unified Dockerfile)
echo "=== Building ponder-ci ==="
docker build \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/ponder \
--build-arg SERVICE_PORT=42069 \
--build-arg ENTRYPOINT_SCRIPT=containers/ponder-entrypoint.sh \
--build-arg HEALTHCHECK_RETRIES=12 \
--build-arg HEALTHCHECK_START=20s \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/ponder-ci:$SHA" \
-t "$REGISTRY/harb/ponder-ci:latest" \
.
docker push "$REGISTRY/harb/ponder-ci:$SHA"
docker push "$REGISTRY/harb/ponder-ci:latest"
# Build and push webapp-ci (unified Dockerfile)
echo "=== Building webapp-ci ==="
docker build \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=web-app \
--build-arg SERVICE_PORT=5173 \
--build-arg HEALTHCHECK_PATH=/app/ \
--build-arg HEALTHCHECK_RETRIES=84 \
--build-arg HEALTHCHECK_START=15s \
--build-arg ENTRYPOINT_SCRIPT=containers/webapp-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg NEEDS_SYMLINKS=true \
-t "$REGISTRY/harb/webapp-ci:$SHA" \
-t "$REGISTRY/harb/webapp-ci:latest" \
.
docker push "$REGISTRY/harb/webapp-ci:$SHA"
docker push "$REGISTRY/harb/webapp-ci:latest"
# Build and push landing-ci (unified Dockerfile)
echo "=== Building landing-ci ==="
docker build \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=landing \
--build-arg SERVICE_PORT=5174 \
--build-arg ENTRYPOINT_SCRIPT=containers/landing-ci-entrypoint.sh \
--build-arg NODE_ENV=development \
--build-arg HEALTHCHECK_RETRIES=6 \
--build-arg HEALTHCHECK_START=10s \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/landing-ci:$SHA" \
-t "$REGISTRY/harb/landing-ci:latest" \
.
docker push "$REGISTRY/harb/landing-ci:$SHA"
docker push "$REGISTRY/harb/landing-ci:latest"
# Build and push txnbot-ci (unified Dockerfile)
echo "=== Building txnbot-ci ==="
docker build \
-f docker/Dockerfile.service-ci \
--build-arg SERVICE_DIR=services/txnBot \
--build-arg SERVICE_PORT=43069 \
--build-arg HEALTHCHECK_PATH=/status \
--build-arg HEALTHCHECK_RETRIES=4 \
--build-arg HEALTHCHECK_START=10s \
--build-arg ENTRYPOINT_SCRIPT=containers/txnbot-entrypoint.sh \
--build-arg NPM_INSTALL_CMD=install \
--build-arg NEEDS_SYMLINKS=false \
-t "$REGISTRY/harb/txnbot-ci:$SHA" \
-t "$REGISTRY/harb/txnbot-ci:latest" \
.
docker push "$REGISTRY/harb/txnbot-ci:$SHA"
docker push "$REGISTRY/harb/txnbot-ci:latest"
echo "=== All CI images built and pushed ==="

View file

@ -1,45 +1,28 @@
kind: pipeline
type: docker
name: build-and-test
name: ci
when:
event: pull_request
path:
exclude:
- "formulas/**"
- "evidence/**"
- "docs/**"
- "*.md"
clone:
git:
image: woodpeckerci/plugin-git
settings:
depth: 50
reference: /git-mirrors/harb.git
netrc_machine: codeberg.org
netrc_username: johba
netrc_password:
from_secret: codeberg_token
trigger:
event:
- push
- pull_request
steps:
- name: bootstrap-deps
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
'
- name: foundry-suite
depends_on: [bootstrap-deps]
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
cd onchain
export PATH=/root/.foundry/bin:$PATH
@ -49,178 +32,29 @@ steps:
forge snapshot
'
- name: contracts-local-fork
depends_on: [foundry-suite]
image: registry.niovi.voyage/harb/node-ci:latest
environment:
HARB_ENV: BASE_SEPOLIA_LOCAL_FORK
commands:
- |
bash -c '
set -euo pipefail
cd onchain
export PATH=/root/.foundry/bin:$PATH
forge test -vv --ffi
'
# NOTE: contracts-base-sepolia step removed — requires base_sepolia_rpc secret
# which is not configured. Re-add when RPC secret is provisioned.
- name: transpiler-tests
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
when:
- event: pull_request
path:
include:
- tools/push3-transpiler/**
- tools/push3-evolution/**
commands:
- |
bash -c '
set -euo pipefail
cd tools/push3-transpiler
npm install --silent
npm run build
npm test
'
- name: evolution-tests
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
when:
- event: pull_request
path:
include:
- tools/push3-evolution/**
- tools/push3-transpiler/**
commands:
- |
bash -c '
set -euo pipefail
cd tools/push3-transpiler
npm install --silent
cd ../push3-evolution
npm install --silent
npm run build
npm test
'
- name: seed-transpile-check
depends_on: [bootstrap-deps]
image: registry.niovi.voyage/harb/node-ci:latest
when:
- event: pull_request
path:
include:
- tools/push3-transpiler/**
- tools/push3-evolution/seeds/**
commands:
- |
bash -c '
set -euo pipefail
cd tools/push3-transpiler
npm install --silent
cd ../..
export PATH=/root/.foundry/bin:$PATH
failed=0
for seed in tools/push3-evolution/seeds/*.push3; do
name=$(basename "$seed")
echo "--- Transpiling $name ---"
if ! npx tsx tools/push3-transpiler/src/index.ts "$seed" onchain/src/OptimizerV3Push3.sol; then
echo "WARN: $name failed to transpile (invalid program) — skipping" >&2
continue
fi
echo "--- Compiling $name ---"
if ! (cd onchain && forge build --skip test script --silent); then
echo "FAIL: $name transpiled but Solidity compilation failed" >&2
failed=1
fi
done
git checkout onchain/src/OptimizerV3Push3.sol
if [ "$failed" -ne 0 ]; then
echo "ERROR: One or more seeds failed transpile+compile check" >&2
exit 1
fi
echo "All seeds transpile and compile successfully."
'
- name: single-package-manager
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -c '
set -euo pipefail
if [ -f kraiken-lib/yarn.lock ]; then
echo "ERROR: kraiken-lib/yarn.lock must not be committed. Use npm only (see packageManager field in kraiken-lib/package.json)." >&2
exit 1
fi
'
- name: validate-evolution-patch
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
when:
- event: pull_request
path:
include:
- onchain/**
- tools/push3-evolution/**
commands:
- |
bash -c '
set -euo pipefail
if ! git apply --check tools/push3-evolution/evolution.patch; then
echo "ERROR: evolution.patch needs regeneration — see tools/push3-evolution/evolution.conf" >&2
exit 1
fi
echo "evolution.patch applies cleanly."
'
- name: optimizer-not-mutated
depends_on: []
image: registry.niovi.voyage/harb/node-ci:latest
commands:
- |
bash -c '
set -euo pipefail
if ! git diff --exit-code onchain/src/OptimizerV3.sol; then
echo "ERROR: onchain/src/OptimizerV3.sol has uncommitted mutations (likely left by batch-eval or inject.sh)." >&2
exit 1
fi
echo "OptimizerV3.sol is clean."
'
- name: node-quality
depends_on: [foundry-suite]
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
environment:
CI: "true"
NODE_OPTIONS: "--max-old-space-size=2048"
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
npm config set fund false
npm config set audit false
./scripts/build-kraiken-lib.sh
# Root install links workspace packages (@harb/web3) + all workspace members
npm install --no-audit --no-fund
# Landing (workspace member — deps already installed by root)
npm install --prefix landing --no-audit --no-fund
npm run lint --prefix landing
npm run build --prefix landing
# Web-app (workspace member)
npm install --prefix web-app --no-audit --no-fund
npm run lint --prefix web-app
npm run test --prefix web-app -- --run
npm run build --prefix web-app
# Ponder (standalone — not a workspace member)
npm install --prefix services/ponder --no-audit --no-fund
npm run lint --prefix services/ponder
npm run build --prefix services/ponder
# TxnBot (standalone)
npm install --prefix services/txnBot --no-audit --no-fund
npm run lint --prefix services/txnBot
npm run test --prefix services/txnBot
npm run test --prefix services/txnBot -- --runInBand
npm run build --prefix services/txnBot
'

74
.woodpecker/contracts.yml Normal file
View file

@ -0,0 +1,74 @@
kind: pipeline
type: docker
name: contracts-local-fork
trigger:
event:
- push
- pull_request
steps:
- name: bootstrap-deps
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -lc '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
'
- name: forge-suite
image: registry.sovraigns.network/harb/node-ci:ci-20251013
environment:
HARB_ENV: BASE_SEPOLIA_LOCAL_FORK
commands:
- |
bash -lc '
set -euo pipefail
cd onchain
export PATH=/root/.foundry/bin:$PATH
forge build
forge test -vv --ffi
forge snapshot
'
---
kind: pipeline
type: docker
name: contracts-base-sepolia
trigger:
event:
- push
- pull_request
steps:
- name: bootstrap-deps
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -lc '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
'
- name: forge-suite
image: registry.sovraigns.network/harb/node-ci:latest
environment:
HARB_ENV: BASE_SEPOLIA
BASE_SEPOLIA_RPC:
from_secret: base_sepolia_rpc
commands:
- |
bash -lc '
set -euo pipefail
cd onchain
export BASE_SEPOLIA_RPC="$BASE_SEPOLIA_RPC"
export PATH=/root/.foundry/bin:$PATH
forge build
forge test -vv --ffi
forge snapshot
'

View file

@ -1,478 +1,60 @@
# E2E Testing Pipeline using Native Woodpecker Services
# No Docker-in-Docker - uses pre-built images for fast startup
kind: pipeline
type: docker
name: e2e
when:
event: pull_request
path:
exclude:
- "tools/**"
- "onchain/test/FitnessEvaluator*"
- "docs/**"
- "formulas/**"
- "evidence/**"
- ".codeberg/**"
- "*.md"
labels:
podman: "true"
clone:
git:
image: woodpeckerci/plugin-git
settings:
depth: 50
reference: /git-mirrors/harb.git
netrc_machine: codeberg.org
netrc_username: johba
netrc_password:
from_secret: codeberg_token
# All background services - services get proper DNS resolution in Woodpecker
# Note: Services can't depend on steps, so they wait internally for contracts.env
services:
# PostgreSQL for Ponder
- name: postgres
image: postgres:16-alpine
environment:
POSTGRES_USER: ponder
POSTGRES_PASSWORD: ponder_local
POSTGRES_DB: ponder_local
# Anvil blockchain fork
- name: anvil
image: ghcr.io/foundry-rs/foundry:latest
entrypoint:
- anvil
- --host=0.0.0.0
- --port=8545
- --fork-url=https://sepolia.base.org
- --fork-block-number=20000000
- --chain-id=31337
- --accounts=10
- --balance=10000
# Ponder indexer - waits for contracts.env from bootstrap
- name: ponder
image: registry.niovi.voyage/harb/ponder-ci:latest
commands:
- |
set -eu
# Wait for contracts.env (bootstrap writes it after deploying)
echo "=== Waiting for contracts.env ==="
for i in $(seq 1 120); do
if [ -f /woodpecker/src/contracts.env ]; then
echo "Found contracts.env after $i attempts"
break
fi
echo "Waiting for contracts.env... ($i/120)"
sleep 3
done
if [ ! -f /woodpecker/src/contracts.env ]; then
echo "ERROR: contracts.env not found after 6 minutes"
exit 1
fi
# Source contract addresses from bootstrap
. /woodpecker/src/contracts.env
echo "=== Contract addresses ==="
echo "KRAIKEN=$KRAIKEN"
echo "STAKE=$STAKE"
echo "START_BLOCK=$START_BLOCK"
# Export env vars required by ponder
export DATABASE_URL="$DATABASE_URL"
export DATABASE_SCHEMA="ponder_ci_$START_BLOCK"
export START_BLOCK="$START_BLOCK"
export KRAIKEN_ADDRESS="$KRAIKEN"
export STAKE_ADDRESS="$STAKE"
export LM_ADDRESS="${LIQUIDITY_MANAGER:-0x0000000000000000000000000000000000000000}"
export PONDER_NETWORK=BASE_SEPOLIA_LOCAL_FORK
export PONDER_RPC_URL_BASE_SEPOLIA_LOCAL_FORK="$PONDER_RPC_URL_1"
export PONDER_RPC_URL_1="$PONDER_RPC_URL_1"
# Overlay kraiken-lib and ponder source from workspace
# CI_WORKSPACE points to the repo checkout directory
WS="${CI_WORKSPACE:-$(pwd)}"
echo "=== Workspace: $WS ==="
echo "=== Overlaying kraiken-lib from workspace ==="
if [ -d "$WS/kraiken-lib/dist" ]; then
cp -r "$WS/kraiken-lib/dist/." /app/kraiken-lib/dist/
cp -r "$WS/kraiken-lib/src/." /app/kraiken-lib/src/
echo "kraiken-lib updated from workspace (src + dist)"
elif [ -d "$WS/kraiken-lib/src" ]; then
cp -r "$WS/kraiken-lib/src/." /app/kraiken-lib/src/
echo "kraiken-lib/src updated (dist not available — may need rebuild)"
else
echo "WARNING: kraiken-lib not found at $WS/kraiken-lib"
fi
echo "=== Overlaying ponder source from workspace ==="
# Copy individual source files (not the directory itself) to avoid nested src/src/
if [ -d "$WS/services/ponder/src" ]; then
cp -r "$WS/services/ponder/src/." /app/services/ponder/src/
echo "ponder/src files updated from workspace"
fi
for f in ponder.schema.ts ponder.config.ts; do
if [ -f "$WS/services/ponder/$f" ]; then
cp "$WS/services/ponder/$f" /app/services/ponder/"$f"
echo "ponder/$f updated from workspace"
fi
done
echo "=== Starting Ponder (pre-built image + workspace overlay) ==="
cd /app/services/ponder
{
echo "DATABASE_URL=${DATABASE_URL}"
echo "PONDER_RPC_URL_1=${PONDER_RPC_URL_1}"
echo "DATABASE_SCHEMA=${DATABASE_SCHEMA}"
echo "START_BLOCK=${START_BLOCK}"
} > .env.local
# Use 'start' mode in CI — 'dev' mode watches for file changes and causes
# a hot-restart loop when workspace overlay modifies source files
exec npm run start
# Webapp - waits for contracts.env from bootstrap
- name: webapp
image: registry.niovi.voyage/harb/webapp-ci:latest
environment:
CI: "true"
commands:
- |
set -eu
# Wait for contracts.env (bootstrap writes it after deploying)
echo "=== Waiting for contracts.env ==="
for i in $(seq 1 120); do
if [ -f /woodpecker/src/contracts.env ]; then
echo "Found contracts.env after $i attempts"
break
fi
echo "Waiting for contracts.env... ($i/120)"
sleep 3
done
if [ ! -f /woodpecker/src/contracts.env ]; then
echo "ERROR: contracts.env not found after 6 minutes"
exit 1
fi
# Source contract addresses from bootstrap
. /woodpecker/src/contracts.env
# Export environment variables for Vite
export VITE_KRAIKEN_ADDRESS="$KRAIKEN"
export VITE_STAKE_ADDRESS="$STAKE"
export VITE_DEFAULT_CHAIN_ID=31337
export VITE_LOCAL_RPC_PROXY_TARGET=http://anvil:8545
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=http://ponder:42069
# Default is the Sepolia SwapRouter; override via VITE_SWAP_ROUTER env var for other networks.
export VITE_SWAP_ROUTER="${VITE_SWAP_ROUTER:-0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4}"
export VITE_ENABLE_LOCAL_SWAP=true
export VITE_BASE_PATH=/app/
# Overlay kraiken-lib from workspace (may be newer than baked-in image)
WS="${CI_WORKSPACE:-$(pwd)}"
echo "=== Overlaying kraiken-lib from workspace ==="
if [ -d "$WS/kraiken-lib/dist" ]; then
cp -r "$WS/kraiken-lib/dist/." /app/kraiken-lib/dist/
cp -r "$WS/kraiken-lib/src/." /app/kraiken-lib/src/
echo "kraiken-lib updated from workspace (src + dist)"
elif [ -d /app/kraiken-lib/src ]; then
echo "kraiken-lib/src found in image (using baked-in version)"
else
echo "ERROR: kraiken-lib/src not found!"
exit 1
fi
# Overlay webapp source from workspace (ensures CI tests current branch)
echo "=== Overlaying webapp source from workspace ==="
if [ -d "$WS/web-app/src" ]; then
cp -r "$WS/web-app/src/." /app/web-app/src/
echo "webapp/src updated from workspace"
fi
for f in vite.config.ts vite.config.js; do
if [ -f "$WS/web-app/$f" ]; then
cp "$WS/web-app/$f" /app/web-app/"$f"
echo "webapp/$f updated from workspace"
fi
done
# Overlay @harb/web3 shared package from workspace
if [ -d "$WS/packages/web3" ]; then
mkdir -p /app/packages/web3
cp -r "$WS/packages/web3/." /app/packages/web3/
# Link @harb/web3 into web-app node_modules
mkdir -p /app/web-app/node_modules/@harb
ln -sf /app/packages/web3 /app/web-app/node_modules/@harb/web3
# Symlink wagmi/viem into packages dir so @harb/web3 can resolve them
mkdir -p /app/packages/web3/node_modules
ln -sf /app/web-app/node_modules/@wagmi /app/packages/web3/node_modules/@wagmi
ln -sf /app/web-app/node_modules/viem /app/packages/web3/node_modules/viem
echo "@harb/web3 linked with wagmi/viem deps"
fi
# Overlay @harb/utils shared package from workspace
if [ -d "$WS/packages/utils" ]; then
mkdir -p /app/packages/utils
cp -r "$WS/packages/utils/." /app/packages/utils/
# Link @harb/utils into web-app node_modules
mkdir -p /app/web-app/node_modules/@harb
ln -sf /app/packages/utils /app/web-app/node_modules/@harb/utils
# Symlink viem into packages dir so @harb/utils can resolve it
mkdir -p /app/packages/utils/node_modules
ln -sf /app/web-app/node_modules/viem /app/packages/utils/node_modules/viem
echo "@harb/utils linked with viem dep"
fi
# Overlay @harb/analytics shared package from workspace
if [ -d "$WS/packages/analytics" ]; then
mkdir -p /app/packages/analytics
cp -r "$WS/packages/analytics/." /app/packages/analytics/
mkdir -p /app/web-app/node_modules/@harb
ln -sf /app/packages/analytics /app/web-app/node_modules/@harb/analytics
echo "@harb/analytics linked for webapp"
fi
echo "=== Starting webapp (pre-built image + source overlay) ==="
cd /app/web-app
# Explicitly set CI=true to disable Vue DevTools in vite.config.ts
# (prevents 500 errors from devtools path resolution in CI environment)
export CI=true
echo "CI=$CI (should be 'true' to disable Vue DevTools)"
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/
# Landing page - no contracts needed, starts immediately
- name: landing
image: registry.niovi.voyage/harb/landing-ci:latest
commands:
- |
set -eu
# Overlay landing source from workspace
WS="${CI_WORKSPACE:-$(pwd)}"
if [ -d "$WS/landing/src" ]; then
cp -r "$WS/landing/src/." /app/landing/src/
echo "landing/src updated from workspace"
fi
for f in vite.config.ts vite.config.js; do
if [ -f "$WS/landing/$f" ]; then
cp "$WS/landing/$f" /app/landing/"$f"
echo "landing/$f updated from workspace"
fi
done
# Overlay @harb/web3 shared package
if [ -d "$WS/packages/web3" ]; then
mkdir -p /app/packages/web3
cp -r "$WS/packages/web3/." /app/packages/web3/
# Landing CI image doesn't have wagmi — install it
cd /app/landing
npm install --no-audit --no-fund @wagmi/vue viem 2>/dev/null || true
# Link @harb/web3
mkdir -p /app/landing/node_modules/@harb
ln -sf /app/packages/web3 /app/landing/node_modules/@harb/web3
# Symlink wagmi/viem into packages dir for resolution
mkdir -p /app/packages/web3/node_modules
ln -sf /app/landing/node_modules/@wagmi /app/packages/web3/node_modules/@wagmi 2>/dev/null || true
ln -sf /app/landing/node_modules/viem /app/packages/web3/node_modules/viem 2>/dev/null || true
echo "@harb/web3 linked for landing"
fi
# Overlay @harb/ui-shared shared package from workspace
if [ -d "$WS/packages/ui-shared" ]; then
mkdir -p /app/packages/ui-shared
cp -r "$WS/packages/ui-shared/." /app/packages/ui-shared/
# Link @harb/ui-shared into landing node_modules
mkdir -p /app/landing/node_modules/@harb
ln -sf /app/packages/ui-shared /app/landing/node_modules/@harb/ui-shared
# Symlink vue into packages dir so @harb/ui-shared can resolve it
mkdir -p /app/packages/ui-shared/node_modules
ln -sf /app/landing/node_modules/vue /app/packages/ui-shared/node_modules/vue 2>/dev/null || true
echo "@harb/ui-shared linked for landing"
fi
# Overlay @harb/analytics shared package from workspace
if [ -d "$WS/packages/analytics" ]; then
mkdir -p /app/packages/analytics
cp -r "$WS/packages/analytics/." /app/packages/analytics/
mkdir -p /app/landing/node_modules/@harb
ln -sf /app/packages/analytics /app/landing/node_modules/@harb/analytics
echo "@harb/analytics linked for landing"
fi
echo "=== Starting landing (pre-built image + source overlay) ==="
cd /app/landing
exec npm run dev -- --host 0.0.0.0 --port 5174
# Caddy proxy - waits for contracts.env to ensure other services are starting
- name: caddy
image: caddy:2.8-alpine
commands:
- |
# Wait briefly for other services to start
echo "=== Waiting for contracts.env before starting Caddy ==="
for i in $(seq 1 120); do
if [ -f /woodpecker/src/contracts.env ]; then
echo "Found contracts.env, starting Caddy..."
break
fi
echo "Waiting for contracts.env... ($i/120)"
sleep 3
done
printf '%s\n' ':8081 {' \
' route /app* {' \
' reverse_proxy webapp:5173' \
' }' \
' route /api/graphql* {' \
' uri strip_prefix /api' \
' reverse_proxy ponder:42069' \
' }' \
' route /api/rpc* {' \
' uri strip_prefix /api/rpc' \
' reverse_proxy anvil:8545' \
' }' \
' reverse_proxy landing:5174' \
'}' > /etc/caddy/Caddyfile
exec caddy run --config /etc/caddy/Caddyfile
trigger:
event:
- push
- pull_request
steps:
# Step 0: Install dependencies for onchain compilation
- name: install-deps
image: node:20-alpine
commands:
- |
set -eu
apk add --no-cache git
echo "=== Installing uni-v3-lib dependencies ==="
git submodule update --init --recursive
cd onchain/lib/uni-v3-lib
npm install
# Step 1: Wait for base services and deploy contracts
# Uses pre-built node-ci image with Foundry pre-installed (saves ~60s)
- name: bootstrap
image: registry.niovi.voyage/harb/node-ci:latest
depends_on:
- install-deps
commands:
- |
# Create a bootstrap wrapper that runs under bash
# (Woodpecker uses /bin/sh which lacks 'source' and bash-isms)
export ANVIL_RPC=http://anvil:8545
export CONTRACT_ENV=/woodpecker/src/contracts.env
export LOG_FILE=/dev/null
export ONCHAIN_DIR="$PWD/onchain"
export TXNBOT_FUND_VALUE=10ether
export TXNBOT_ADDRESS=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
export TXNBOT_PRIVATE_KEY=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
exec bash scripts/ci-bootstrap.sh
# Step 2: Wait for stack to be healthy (services run in background)
- name: wait-for-stack
image: alpine:3.20
depends_on:
- bootstrap
commands:
- |
set -eu
apk add --no-cache curl bash
echo "=== Waiting for DNS resolution (Docker embedded DNS can be slow under load) ==="
for svc in ponder webapp landing caddy; do
for attempt in $(seq 1 60); do
if getent hosts "$svc" >/dev/null 2>&1; then
echo "[dns] $svc resolved after $attempt attempts"
break
fi
echo "[dns] ($attempt/60) waiting for $svc DNS..."
sleep 5
done
done
echo "=== Waiting for stack to be healthy (max 7 min) ==="
bash scripts/wait-for-service.sh http://ponder:42069/health 420 ponder
# Wait for ponder to finish historical indexing (not just respond)
# /ready returns 200 only when fully synced, 503 while indexing
echo "=== Waiting for Ponder indexing to complete ==="
for i in $(seq 1 120); do
HTTP_CODE=$(curl -sf -o /dev/null -w '%{http_code}' --max-time 3 http://ponder:42069/ready 2>/dev/null || echo "000")
if [ "$HTTP_CODE" = "200" ]; then
echo "[wait] Ponder fully indexed after $((i * 3))s"
break
fi
if [ "$i" = "120" ]; then
echo "[wait] WARNING: Ponder not fully indexed after 360s, continuing anyway"
fi
echo "[wait] ($i/120) Ponder indexing... (HTTP $HTTP_CODE)"
sleep 3
done
bash scripts/wait-for-service.sh http://webapp:5173/app/ 420 webapp
bash scripts/wait-for-service.sh http://landing:5174/ 420 landing
bash scripts/wait-for-service.sh http://caddy:8081/app/ 420 caddy
echo "=== Stack is healthy ==="
# Step 3: Run E2E tests — cross-browser matrix
# Chromium runs all specs (01-07), then Firefox/WebKit/mobile run read-only specs (03,06,07).
# The matrix is defined in playwright.config.ts via `projects`.
- name: run-e2e-tests
image: mcr.microsoft.com/playwright:v1.55.1-jammy
depends_on:
- wait-for-stack
timeout: 1800
- name: run-e2e
image: registry.sovraigns.network/harb/playwright-ci:ci-20251013
privileged: true
environment:
STACK_BASE_URL: http://caddy:8081
STACK_RPC_URL: http://caddy:8081/api/rpc
STACK_WEBAPP_URL: http://caddy:8081
STACK_GRAPHQL_URL: http://caddy:8081/api/graphql
CI: "true"
PNPM_HOME: /root/.local/share/pnpm
PATH: /root/.local/share/pnpm:/root/.local/bin:/usr/local/bin:/usr/bin:/bin
HARB_ENV: BASE_SEPOLIA_LOCAL_FORK
SKIP_WATCH: "1"
XDG_RUNTIME_DIR: /tmp/podman-run
commands:
- |
set -eux
echo "=== Checking system resources ==="
free -h || true
cat /proc/meminfo | grep -E 'MemTotal|MemAvail' || true
echo "=== Verifying Playwright browsers ==="
npx playwright install --dry-run 2>&1 || true
ls -la /ms-playwright/ 2>/dev/null || echo "No /ms-playwright directory"
echo "=== Installing test dependencies ==="
set -eu
set -o pipefail 2>/dev/null || true
mkdir -p "$XDG_RUNTIME_DIR"
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
npm config set fund false
npm config set audit false
npm ci --no-audit --no-fund
npm install --prefix kraiken-lib --no-audit --no-fund
./scripts/build-kraiken-lib.sh
npm install --prefix landing --no-audit --no-fund
npm install --prefix web-app --no-audit --no-fund
npm install --prefix services/ponder --no-audit --no-fund
npm install --prefix services/txnBot --no-audit --no-fund
npm install --no-audit --no-fund
npx playwright install chromium
trap "./scripts/dev.sh stop || true" EXIT
./scripts/dev.sh start
timeout 240 bash -c 'until curl -sf http://localhost:8081/api/graphql > /dev/null; do sleep 3; done'
npm run test:e2e
echo "=== Running E2E tests — cross-browser matrix (workers=1 to limit memory) ==="
npx playwright test --reporter=list --workers=1
# Step 4: Collect artifacts
- name: collect-artifacts
image: alpine:3.20
depends_on:
- run-e2e-tests
- run-e2e
when:
status:
- success
- failure
commands:
- |
set -eu
set -euo pipefail
apk add --no-cache tar gzip
mkdir -p artifacts
if [ -d playwright-report ]; then
tar -czf artifacts/playwright-report.tgz playwright-report
echo "Playwright report archived"
fi
if [ -d test-results ]; then
tar -czf artifacts/test-results.tgz test-results
echo "Test results archived"
fi
ls -lh artifacts/ 2>/dev/null || echo "No artifacts"
if [ -d playwright-report ]; then tar -czf artifacts/playwright-report.tgz playwright-report; fi
if [ -d test-results ]; then tar -czf artifacts/test-results.tgz test-results; fi
if [ -d logs ]; then tar -czf artifacts/stack-logs.tgz logs; fi

View file

@ -2,25 +2,26 @@ kind: pipeline
type: docker
name: fuzz-nightly
when:
event: cron
trigger:
event:
- cron
steps:
- name: bootstrap-deps
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
git submodule update --init --recursive
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
'
- name: fuzz
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
if ! command -v bc >/dev/null 2>&1; then
apt-get update

View file

@ -1,12 +0,0 @@
kind: pipeline
type: docker
name: passthrough
when:
- event: pull_request
steps:
- name: pass
image: alpine
commands:
- echo "ok"

View file

@ -2,23 +2,24 @@ kind: pipeline
type: docker
name: release
labels:
podman: "true"
when:
event: tag
steps:
- name: version-check
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
when:
event: tag
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
git submodule update --init --recursive
corepack enable
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
export PATH=/root/.foundry/bin:$PATH
forge build >/dev/null
corepack enable
yarn --cwd onchain/lib/uni-v3-lib install --frozen-lockfile
npm config set fund false
npm config set audit false
npm install --prefix kraiken-lib --no-audit --no-fund
@ -68,14 +69,14 @@ steps:
'
- name: build-artifacts
image: registry.niovi.voyage/harb/node-ci:latest
image: registry.sovraigns.network/harb/node-ci:ci-20251013
depends_on:
- version-check
when:
event: tag
commands:
- |
bash -c '
bash -lc '
set -euo pipefail
npm config set fund false
npm config set audit false
@ -106,8 +107,8 @@ steps:
tar -czf release-bundle.tgz -C release dist
'
- name: docker-publish
image: registry.niovi.voyage/harb/playwright-ci:latest
- name: podman-publish
image: registry.sovraigns.network/harb/playwright-ci:ci-20251013
pull: true
privileged: true
depends_on:
@ -125,7 +126,7 @@ steps:
from_secret: registry_password
commands:
- |
bash -c '
bash -lc '
set -eo pipefail
if [ -z "${CI_COMMIT_TAG:-}" ]; then
echo "CI_COMMIT_TAG not set" >&2
@ -140,38 +141,38 @@ steps:
if [ -z "${COMPOSE_PROJECT_NAME:-}" ]; then
COMPOSE_PROJECT_NAME=harb
fi
REGISTRY_ROOT="${REGISTRY_SERVER:-registry.niovi.voyage}"
REGISTRY_ROOT="${REGISTRY_SERVER:-registry.sovraigns.network}"
REGISTRY_NS="${REGISTRY_NAMESPACE:-harb}"
REGISTRY_BASE="$REGISTRY_ROOT/$REGISTRY_NS"
docker login "$REGISTRY_ROOT" -u "$REGISTRY_USERNAME" -p "$REGISTRY_PASSWORD"
podman login "$REGISTRY_ROOT" -u "$REGISTRY_USERNAME" -p "$REGISTRY_PASSWORD"
# Build and publish CI base images
node_ci_tmp=harb-node-ci-build
playwright_ci_tmp=harb-playwright-ci-build
docker build -f docker/Dockerfile.node-ci -t "$node_ci_tmp" .
docker tag "$node_ci_tmp" "$REGISTRY_BASE/node-ci:$TAG"
docker push "$REGISTRY_BASE/node-ci:$TAG"
docker tag "$REGISTRY_BASE/node-ci:$TAG" "$REGISTRY_BASE/node-ci:latest"
docker push "$REGISTRY_BASE/node-ci:latest"
podman build -f docker/Dockerfile.node-ci -t "$node_ci_tmp" .
podman tag "$node_ci_tmp" "$REGISTRY_BASE/node-ci:$TAG"
podman push "$REGISTRY_BASE/node-ci:$TAG"
podman tag "$REGISTRY_BASE/node-ci:$TAG" "$REGISTRY_BASE/node-ci:latest"
podman push "$REGISTRY_BASE/node-ci:latest"
docker build -f docker/Dockerfile.playwright-ci -t "$playwright_ci_tmp" .
docker tag "$playwright_ci_tmp" "$REGISTRY_BASE/playwright-ci:$TAG"
docker push "$REGISTRY_BASE/playwright-ci:$TAG"
docker tag "$REGISTRY_BASE/playwright-ci:$TAG" "$REGISTRY_BASE/playwright-ci:latest"
docker push "$REGISTRY_BASE/playwright-ci:latest"
podman build -f docker/Dockerfile.playwright-ci -t "$playwright_ci_tmp" .
podman tag "$playwright_ci_tmp" "$REGISTRY_BASE/playwright-ci:$TAG"
podman push "$REGISTRY_BASE/playwright-ci:$TAG"
podman tag "$REGISTRY_BASE/playwright-ci:$TAG" "$REGISTRY_BASE/playwright-ci:latest"
podman push "$REGISTRY_BASE/playwright-ci:latest"
docker-compose build ponder webapp landing txn-bot
podman-compose build ponder webapp landing txn-bot
for service in ponder webapp landing txn-bot; do
image=$(docker image ls --filter "label=com.docker.compose.project=$COMPOSE_PROJECT_NAME" --filter "label=com.docker.compose.service=$service" --format "{{.Repository}}:{{ .Tag }}" | head -n1)
image=$(podman image ls --filter "label=com.docker.compose.project=$COMPOSE_PROJECT_NAME" --filter "label=com.docker.compose.service=$service" --format "{{.Repository}}:{{ .Tag }}" | head -n1)
if [ -z "$image" ]; then
echo "Unable to find built image for $service" >&2
exit 1
fi
target="$REGISTRY_BASE/$service"
docker tag "$image" "$target:$TAG"
docker push "$target:$TAG"
docker tag "$target:$TAG" "$target:latest"
docker push "$target:latest"
podman tag "$image" "$target:$TAG"
podman push "$target:$TAG"
podman tag "$target:$TAG" "$target:latest"
podman push "$target:latest"
done
'

135
AGENTS.md
View file

@ -1,94 +1,73 @@
<!-- last-reviewed: baa501fa46355f7b04bffdf386d397ad19f69298 -->
# Agent Brief: Harb Stack
## What is KRAIKEN?
KRAIKEN couples Harberger-tax staking with a dominant Uniswap V3 liquidity manager to create asymmetric slippage, sentiment-driven pricing, and VWAP "price memory" safeguards. Liquidity dominance is mission-critical; treat any regression that weakens the LiquidityManager's control as a priority incident.
## Core Concepts
- KRAIKEN couples Harberger-tax staking with a dominant Uniswap V3 liquidity manager to create asymmetric slippage, sentiment-driven pricing, and VWAP "price memory" safeguards.
- Liquidity dominance is mission-critical; treat any regression that weakens the LiquidityManager's control as a priority incident.
- Harberger staking supplies the sentiment oracle that drives Optimizer parameters, which in turn tune liquidity placement and supply expansion.
## User Journey
1. **Buy** Acquire KRAIKEN on Uniswap.
2. **Stake** Declare a tax rate on kraiken.org to earn from protocol growth.
3. **Compete** Snatch undervalued positions to optimise returns.
1. **Buy** - Acquire KRAIKEN on Uniswap.
2. **Stake** - Declare a tax rate on kraiken.org to earn from protocol growth.
3. **Compete** - Snatch undervalued positions to optimise returns.
## Directory Map
| Path | What | Guide |
|------|------|-------|
| `onchain/` | Solidity + Foundry contracts, deploy scripts, fuzzing | [onchain/AGENTS.md](onchain/AGENTS.md) |
| `services/ponder/` | Ponder indexer powering the GraphQL API | [services/ponder/AGENTS.md](services/ponder/AGENTS.md) |
| `landing/` | Vue 3 marketing + staking interface | [landing/AGENTS.md](landing/AGENTS.md) |
| `web-app/` | Staking UI | [web-app/AGENTS.md](web-app/AGENTS.md) |
| `kraiken-lib/` | Shared TypeScript helpers for clients and bots | [kraiken-lib/AGENTS.md](kraiken-lib/AGENTS.md) |
| `services/txnBot/` | Automation bot for `recenter()` and `payTax()` upkeep | [services/txnBot/AGENTS.md](services/txnBot/AGENTS.md) |
| `formulas/` | TOML pipeline definitions (sense/act) for the evaluator | [formulas/AGENTS.md](formulas/AGENTS.md) |
| `scripts/` | `dev.sh`, bootstrap, build helpers; `harb-evaluator/` red-team agent | [scripts/harb-evaluator/AGENTS.md](scripts/harb-evaluator/AGENTS.md) |
| `packages/analytics/` | `@harb/analytics` — self-hosted Umami wrapper for funnel tracking | — |
| `tests/e2e/` | Playwright end-to-end tests — desktop + mobile viewports (iPhone 14, Pixel 7), Chromium + Firefox cross-browser matrix; includes conversion funnel spec (`07-conversion-funnel.spec.ts`) | — |
| `docs/` | Architecture, product truth, environment, ops guides | — |
## Operating the Stack
- Start everything with `nohup ./scripts/dev.sh start &` and stop via `./scripts/dev.sh stop`. Do not launch services individually.
- **Restart modes** for faster iteration:
- `./scripts/dev.sh restart --light` - Fast restart (~10-20s): only webapp + txnbot, preserves Anvil/Ponder state. Use for frontend changes.
- `./scripts/dev.sh restart --full` - Full restart (~3-4min): redeploys contracts, fresh state. Use for contract changes.
- Supported environments: `BASE_SEPOLIA_LOCAL_FORK` (default Anvil fork), `BASE_SEPOLIA`, and `BASE`. Match contract addresses and RPCs accordingly.
- The stack boots Anvil, deploys contracts, seeds liquidity, starts Ponder, launches the landing site, and runs the txnBot. Wait for logs to settle before manual testing.
## Quick Start
```bash
./scripts/dev.sh start # boots full stack (~3-6 min first time)
./scripts/dev.sh health # verify all services healthy
./scripts/dev.sh stop # stop and clean up
```
See [docs/dev-environment.md](docs/dev-environment.md) for restart modes, ports, Docker topology, and common pitfalls.
## Component Guides
- `onchain/` - Solidity + Foundry contracts, deploy scripts, and fuzzing helpers ([details](onchain/AGENTS.md)).
- `services/ponder/` - Ponder indexer powering the GraphQL API ([details](services/ponder/AGENTS.md)).
- `landing/` - Vue 3 marketing + staking interface ([details](landing/AGENTS.md)).
- `kraiken-lib/` - Shared TypeScript helpers for clients and bots ([details](kraiken-lib/AGENTS.md)).
- `services/txnBot/` - Automation bot for `recenter()` and `payTax()` upkeep ([details](services/txnBot/AGENTS.md)).
## Docker / LXD Notes
- Containers require `security_opt: apparmor=unconfined` when running inside LXD to avoid permission denied errors on Unix socket creation (Anvil, Postgres).
- Umami analytics runs on **port 3001** (moved from 3000 to avoid conflict with Forgejo when running alongside the disinto factory stack).
## Testing & Tooling
- Contracts: run `forge build`, `forge test`, and `forge snapshot` inside `onchain/`.
- Fuzzing: scripts under `onchain/analysis/` (e.g., `./analysis/run-fuzzing.sh [optimizer] debugCSV`) generate replayable scenarios.
- Integration: after the stack boots, inspect Anvil logs, hit `http://localhost:8081/api/graphql` for Ponder, and poll `http://localhost:8081/api/txn/status` for txnBot health.
- **E2E Tests**: Playwright-based full-stack tests in `tests/e2e/` verify complete user journeys (mint ETH → swap KRK → stake). Run with `npm run test:e2e` from repo root. Tests use mocked wallet provider with Anvil accounts and automatically start/stop the stack. See `INTEGRATION_TEST_STATUS.md` and `SWAP_VERIFICATION.md` for details.
## Red-team Agent Context
The red-team agent (`scripts/harb-evaluator/red-team.sh`) injects the following Solidity sources into the agent prompt so it can reason from exact contract logic:
- `LiquidityManager.sol` — three-position manager, recenter, floor formula
- `ThreePositionStrategy.sol` — position lifecycle abstractions
- `Optimizer.sol` / `OptimizerV3.sol` — current candidate under test
- `VWAPTracker.sol` / `PriceOracle.sol` — price oracle and VWAP mechanics
- `Kraiken.sol``outstandingSupply()`, KRK mint/burn, transfer mechanics
- `Stake.sol``snatch()`, withdrawal, KRK exclusion from floor denominator
## Version Validation System
- **Contract VERSION**: `Kraiken.sol` exposes a `VERSION` constant (currently v1) that must be incremented for breaking changes to TAX_RATES, events, or core data structures.
- **Ponder Validation**: On startup, Ponder reads the contract VERSION and validates against `COMPATIBLE_CONTRACT_VERSIONS` in `kraiken-lib/src/version.ts`. Fails hard (exit 1) on mismatch to prevent indexing wrong data.
- **Frontend Check**: Web-app validates `KRAIKEN_LIB_VERSION` at runtime (currently placeholder; future: query Ponder GraphQL for full 3-way validation).
- **CI Enforcement**: GitHub workflow validates that contract VERSION is in `COMPATIBLE_CONTRACT_VERSIONS` before merging PRs.
- See `VERSION_VALIDATION.md` for complete architecture, workflows, and troubleshooting.
## Key Patterns
- **ES Modules everywhere**: The entire stack uses `"type": "module"` and `import` syntax.
- **`token0isWeth`**: Flips amount semantics; confirm ordering before seeding or interpreting liquidity.
- **Price^2 (X96)**: VWAP, `ethScarcity`, and Optimizer outputs operate on price^2. Avoid "normalising" to sqrt inadvertently.
- **LiquidityManager funding**: Fund with Base WETH (`0x4200...0006`) before expecting `recenter()` to succeed.
- **Ponder state**: Stored in `.ponder/`; drop the directory if schema changes break migrations.
- **Harberger staking** supplies the sentiment oracle that drives Optimizer parameters, which in turn tune liquidity placement and supply expansion.
- **viem v2 slot0**: `slot0()` returns an array, not a record. `tick` is at index 1 (e.g. `slot0Response[1]`), not `slot0Response.tick`.
## Podman Orchestration
- **Dependency Management**: `podman-compose.yml` has NO `depends_on` declarations. All service ordering is handled in `scripts/dev.sh` via phased startup with explicit health checks.
- **Why**: Podman's dependency graph validator fails when containers have compose metadata dependencies, causing "container not found in input list" errors even when containers exist.
- **Startup Phases**: (1) Create all containers, (2) Start anvil+postgres and wait for healthy, (3) Start bootstrap and wait for completion, (4) Start ponder and wait for healthy, (5) Start webapp/landing/txn-bot, (6) Start caddy.
- If you see dependency graph errors, verify `depends_on` was not re-added to `podman-compose.yml`.
## Engineering Principles
These apply to infrastructure (Docker, scripts, startup/teardown) and test/scenario execution — NOT to frontend polling of HTTP APIs where caching is the correct solution.
1. **Never use fixed delays or `waitForTimeout`** — react to actual events instead. Use `eth_subscribe` (WebSocket) for on-chain push notifications, `eth_newFilter` + `eth_getFilterChanges` for on-chain polling, DOM mutation observers or Playwright's `waitForSelector`/`waitForURL` for UI changes, callback patterns for async flows. Even if event-driven code takes more effort, it is always the right answer.
2. **Never use hardcoded expectations** — dynamic systems change. React to actual state, not assumed state. Don't assert a specific block number, token amount, or address unless it's a protocol constant.
3. **Event subscription > polling with timeout > fixed delay** — prefer true push subscriptions (`eth_subscribe`, WebSocket, observers). When push is unavailable (e.g. HTTP-only RPC), polling with a timeout and clear error is acceptable. A fixed `sleep`/`wait`/`waitForTimeout` is never acceptable. Existing violations should be replaced when touched.
**Note:** Frontend components polling HTTP APIs (e.g. LiveStats polling Ponder GraphQL) are fine — the scalability solution there is caching at the proxy layer, not subscriptions.
## Before Opening a PR
1. `forge build && forge test` in `onchain/` — contracts must compile and pass.
2. Run `npm run test:e2e` from repo root if you touched frontend or services.
3. `git diff --check` — no trailing whitespace or merge markers.
4. Keep commits clean; never leave commented-out code or untested changes.
5. If you changed `kraiken-lib`, rebuild: `./scripts/build-kraiken-lib.sh`.
6. If you changed contract VERSION or events, update `COMPATIBLE_CONTRACT_VERSIONS` in `kraiken-lib/src/version.ts`.
## Guardrails & Tips
- `token0isWeth` flips amount semantics; confirm ordering before seeding or interpreting liquidity.
- VWAP, `ethScarcity`, and Optimizer outputs operate on price^2 (X96). Avoid "normalising" to sqrt inadvertently.
- Fund the LiquidityManager with Base WETH (`0x4200...0006`) before expecting `recenter()` to succeed.
- Ponder stores data in `.ponder/`; drop the directory if schema changes break migrations.
- Keep git clean before committing; never leave commented-out code or untested changes.
- **ES Modules**: The entire stack uses ES modules. kraiken-lib, txnBot, Ponder, and web-app all require `"type": "module"` in package.json and use `import` syntax.
- **kraiken-lib Build**: Run `./scripts/build-kraiken-lib.sh` before `podman-compose up` so containers mount a fresh `kraiken-lib/dist` from the host.
- **Live Reload**: `scripts/watch-kraiken-lib.sh` rebuilds on file changes (requires inotify-tools) and restarts dependent containers automatically.
## Code Quality & Git Hooks
Pre-commit hooks (Husky + lint-staged) run ESLint + Prettier on staged files. Each component has its own `.lintstagedrc.json`. To test manually: `git add <files> && .husky/pre-commit`.
- **Pre-commit Hooks**: Husky runs lint-staged on all staged files before commits. Each component (onchain, kraiken-lib, ponder, txnBot, web-app, landing) has `.lintstagedrc.json` configured for ESLint + Prettier.
- **Version Validation (Future)**: Pre-commit hook includes validation logic that will enforce version sync between `onchain/src/Kraiken.sol` (contract VERSION constant) and `kraiken-lib/src/version.ts` (COMPATIBLE_CONTRACT_VERSIONS array). This validation only runs if both files exist and contain version information.
- **Husky Setup**: `.husky/pre-commit` orchestrates all pre-commit checks. Modify this file to add new validation steps.
- To test hooks manually: `git add <files> && .husky/pre-commit`
## Deeper Docs
| Topic | File |
|-------|------|
| Dev environment, Docker, ports, pitfalls | [docs/dev-environment.md](docs/dev-environment.md) |
| Woodpecker CI setup and debugging | [docs/ci-pipeline.md](docs/ci-pipeline.md) |
| Testing: Foundry, E2E, version validation | [docs/testing.md](docs/testing.md) |
| Codeberg API access and webhooks | [docs/codeberg-api.md](docs/codeberg-api.md) |
| Product truth and positioning | [docs/PRODUCT-TRUTH.md](docs/PRODUCT-TRUTH.md) |
| Architecture overview | [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) |
| UX decisions | [docs/UX-DECISIONS.md](docs/UX-DECISIONS.md) |
| Environment configuration | [docs/ENVIRONMENT.md](docs/ENVIRONMENT.md) |
| Version validation architecture | [VERSION_VALIDATION.md](VERSION_VALIDATION.md) |
| Uniswap V3 math deep dive | [onchain/UNISWAP_V3_MATH.md](onchain/UNISWAP_V3_MATH.md) |
| Technical appendix | [TECHNICAL_APPENDIX.md](TECHNICAL_APPENDIX.md) |
| Harberger tax mechanics | [HARBERG.md](HARBERG.md) |
## Handy Commands
- `foundryup` - update Foundry toolchain.
- `anvil --fork-url https://sepolia.base.org` - manual fork when diagnosing outside the helper script.
- `cast call <POOL> "slot0()"` - inspect pool state.
- `PONDER_NETWORK=BASE_SEPOLIA_LOCAL_FORK npm run dev` (inside `services/ponder/`) - focused indexer debugging when the full stack is already running.
- `curl -X POST http://localhost:8081/api/graphql -d '{"query":"{ stats(id:\"0x01\"){kraikenTotalSupply}}"}'`
- `curl http://localhost:8081/api/txn/status`
## References
- Deployment history: `onchain/deployments-local.json`, `onchain/broadcast/`.
- Deep dives: `TECHNICAL_APPENDIX.md`, `HARBERG.md`, and `onchain/UNISWAP_V3_MATH.md`.

View file

@ -0,0 +1,264 @@
# Changelog: Version Validation System & Tax Rate Index Refactoring
## Date: 2025-10-07
## Summary
This release implements a comprehensive version validation system to ensure contract-indexer-frontend compatibility and completes the tax rate index refactoring to eliminate fragile decimal lookups.
## Major Features
### 1. Version Validation System
**Contract Changes:**
- `onchain/src/Kraiken.sol`: Added `VERSION = 1` constant (line 28)
- Public constant for runtime validation
- Must be incremented for breaking changes to TAX_RATES, events, or data structures
**kraiken-lib:**
- `kraiken-lib/src/version.ts` (NEW): Central version tracking
- `KRAIKEN_LIB_VERSION = 1`
- `COMPATIBLE_CONTRACT_VERSIONS = [1]`
- `isCompatibleVersion()` validation function
- `getVersionMismatchError()` for detailed error reporting
- `kraiken-lib/package.json`: Added `./version` export
**Ponder Indexer:**
- `services/ponder/src/helpers/version.ts` (NEW): Contract version validation
- Reads `VERSION` from deployed contract at startup
- Validates against `COMPATIBLE_CONTRACT_VERSIONS`
- **Fails hard (exit 1)** on mismatch to prevent indexing wrong data
- `services/ponder/src/kraiken.ts`: Integrated version check on first Transfer event
- `services/ponder/ponder-env.d.ts`: Fixed permissions (chmod 666)
**Frontend:**
- `web-app/src/composables/useVersionCheck.ts` (NEW): Version validation composable
- Validates `KRAIKEN_LIB_VERSION` loads correctly
- Placeholder for future GraphQL-based 3-way validation
- Warns (doesn't fail) on mismatch
**CI/CD:**
- `.github/workflows/validate-version.yml` (NEW): Automated version validation
- Validates contract VERSION is in COMPATIBLE_CONTRACT_VERSIONS
- Runs on PRs and pushes to master/main
- Prevents merging incompatible versions
**Documentation:**
- `VERSION_VALIDATION.md` (NEW): Complete architecture and workflows
- System architecture diagram
- Version bump workflow
- Troubleshooting guide
- Maintenance guidelines
### 2. Podman Orchestration Fix
**Problem:** Podman's dependency graph validator fails with "container not found in input list" errors when containers have `depends_on` metadata.
**Solution:**
- `podman-compose.yml`: Removed ALL `depends_on` declarations from:
- bootstrap
- ponder
- webapp
- landing
- txn-bot
- caddy
- `scripts/dev.sh`: Implemented phased startup with explicit health checks:
1. Create all containers (`podman-compose up --no-start`)
2. Start anvil & postgres, wait for healthy
3. Start bootstrap, wait for completion
4. Start ponder, wait for healthy
5. Start webapp/landing/txn-bot
6. Start caddy
**Result:** Stack starts reliably without dependency graph errors.
### 3. Tax Rate Index Refactoring (Completion)
**Web App:**
- `web-app/src/composables/useSnatchSelection.ts`:
- Replaced `position.taxRate >= maxTaxRateDecimal` with `posIndex >= selectedTaxRateIndex`
- Fixed test data to match index-based logic
- `web-app/src/composables/usePositions.ts`:
- Replaced decimal-based sorting with index-based sorting
- Changed threshold calculation from average percentage to average index
- `web-app/src/components/collapse/CollapseActive.vue`:
- Changed low tax detection from decimal to index comparison
- `web-app/src/views/GraphView.vue`: **DELETED** (dead code, 63 lines)
**Ponder:**
- `services/ponder/ponder.schema.ts`:
- **CRITICAL FIX**: Import `TAX_RATE_OPTIONS` from kraiken-lib instead of hardcoded array
- Added `taxRateIndex` column to positions table
- Added index on `taxRateIndex` column
- `services/ponder/src/stake.ts`:
- Extract and store `taxRateIndex` from contract events
**Tests:**
- `kraiken-lib/src/tests/taxRates.test.ts`: Fixed Jest ES module compatibility
- `kraiken-lib/jest.config.js``kraiken-lib/jest.config.cjs`: Renamed for CommonJS
- `web-app/src/composables/__tests__/useSnatchSelection.spec.ts`: Fixed test data inconsistencies
## File Changes
### Added Files (7)
1. `.github/workflows/validate-version.yml` - CI/CD validation
2. `VERSION_VALIDATION.md` - Documentation
3. `kraiken-lib/src/version.ts` - Version tracking
4. `kraiken-lib/jest.config.cjs` - Jest config
5. `services/ponder/src/helpers/version.ts` - Ponder validation
6. `web-app/src/composables/useVersionCheck.ts` - Frontend validation
7. `scripts/sync-tax-rates.mjs` - Tax rate sync script
### Deleted Files (2)
1. `web-app/src/views/GraphView.vue` - Dead code
2. `kraiken-lib/jest.config.js` - Replaced with .cjs
### Modified Files (29)
1. `.gitignore` - Added test artifacts, logs, ponder state
2. `CLAUDE.md` - Added Version Validation and Podman Orchestration sections
3. `kraiken-lib/AGENTS.md` - Added version.ts to Key Modules
4. `kraiken-lib/package.json` - Added ./version export
5. `kraiken-lib/src/index.ts` - Export version validation functions
6. `kraiken-lib/src/taxRates.ts` - Generated tax rates with checksums
7. `kraiken-lib/src/tests/taxRates.test.ts` - Fixed Jest compatibility
8. `onchain/src/Kraiken.sol` - Added VERSION constant
9. `podman-compose.yml` - Removed all depends_on declarations
10. `scripts/build-kraiken-lib.sh` - Updated build process
11. `scripts/dev.sh` - Implemented phased startup
12. `services/ponder/AGENTS.md` - Updated documentation
13. `services/ponder/ponder-env.d.ts` - Fixed permissions
14. `services/ponder/ponder.schema.ts` - Import from kraiken-lib, add taxRateIndex
15. `services/ponder/src/kraiken.ts` - Added version validation
16. `services/ponder/src/stake.ts` - Store taxRateIndex
17. `tests/e2e/01-acquire-and-stake.spec.ts` - Test updates
18. `web-app/README.md` - Documentation updates
19. `web-app/env.d.ts` - Type updates
20. `web-app/src/components/StakeHolder.vue` - Index-based logic
21. `web-app/src/components/collapse/CollapseActive.vue` - Index comparison
22. `web-app/src/components/fcomponents/FSelect.vue` - Index handling
23. `web-app/src/composables/__tests__/useSnatchSelection.spec.ts` - Fixed tests
24. `web-app/src/composables/useAdjustTaxRates.ts` - Index-based adjustments
25. `web-app/src/composables/usePositions.ts` - Index-based sorting and threshold
26. `web-app/src/composables/useSnatchSelection.ts` - Index-based filtering
27. `web-app/src/composables/useStake.ts` - Index handling
28-29. Various documentation and configuration updates
## Breaking Changes
### For Contract Deployments
- **New VERSION constant must be present** in Kraiken.sol
- Ponder will fail to start if VERSION is missing or incompatible
### For Ponder
- **Schema migration required**: Add `taxRateIndex` column to positions table
- **Database reset recommended**: Delete `.ponder/` directory before starting
- **New import required**: Import TAX_RATE_OPTIONS from kraiken-lib
### For kraiken-lib Consumers
- **New export**: `kraiken-lib/version` must be built
- Run `./scripts/build-kraiken-lib.sh` to regenerate dist/
## Migration Guide
### Updating to This Version
1. **Stop the stack:**
```bash
./scripts/dev.sh stop
```
2. **Clean Ponder state:**
```bash
rm -rf services/ponder/.ponder/
```
3. **Rebuild kraiken-lib:**
```bash
./scripts/build-kraiken-lib.sh
```
4. **Rebuild contracts (if needed):**
```bash
cd onchain && forge build
```
5. **Start the stack:**
```bash
./scripts/dev.sh start
```
6. **Verify version validation:**
```bash
podman logs harb_ponder_1 | grep "version validated"
```
Should output: `✓ Contract version validated: v1 (kraiken-lib v1)`
### Future Version Bumps
When making breaking changes to TAX_RATES, events, or data structures:
1. **Increment VERSION in Kraiken.sol:**
```solidity
uint256 public constant VERSION = 2;
```
2. **Update COMPATIBLE_CONTRACT_VERSIONS in kraiken-lib/src/version.ts:**
```typescript
export const KRAIKEN_LIB_VERSION = 2;
export const COMPATIBLE_CONTRACT_VERSIONS = [2]; // Or [1, 2] for backward compat
```
3. **Rebuild and redeploy:**
```bash
./scripts/build-kraiken-lib.sh
rm -rf services/ponder/.ponder/
cd onchain && forge script script/Deploy.s.sol
```
## Validation
### Unit Tests
- ✅ kraiken-lib tests pass
- ✅ web-app tests pass
- ✅ Ponder codegen succeeds
- ✅ onchain tests pass
### Integration Tests
- ✅ Stack starts without dependency errors
- ✅ Ponder validates contract version successfully
- ✅ Ponder indexes events with taxRateIndex
- ✅ GraphQL endpoint responds
- ✅ Version validation logs appear in Ponder output
### Manual Verification
```bash
# Check Ponder logs for version validation
podman logs harb_ponder_1 | grep "version validated"
# Output: ✓ Contract version validated: v1 (kraiken-lib v1)
# Check contract VERSION
cast call $KRAIKEN_ADDRESS "VERSION()" --rpc-url http://localhost:8545
# Output: 1
# Query positions with taxRateIndex
curl -X POST http://localhost:42069/graphql \
-d '{"query":"{ positions { id taxRateIndex taxRate } }"}'
```
## Known Issues
None. All blocking issues resolved.
## Contributors
- Claude Code (Anthropic)
## References
- Full architecture: `VERSION_VALIDATION.md`
- Podman orchestration: `CLAUDE.md` § Podman Orchestration
- Tax rate system: `kraiken-lib/src/taxRates.ts`

1
CLAUDE.md Symbolic link
View file

@ -0,0 +1 @@
AGENTS.md

View file

@ -1,16 +1,27 @@
# Harberger (Stage 1)
# Harberg
The foundation layer of the KRAIKEN protocol. A staking market balanced by the Harberger tax.
## product
A staking market balanced by the Harberger Tax.
## Status: Complete
## token
$HRB is created when users buy more tokens and sell less from the uniswap pool (mainly from the liquidity position owned by the Harberg protocol)
Stage 1 established the core mechanisms now used by Stage 2 (KRAIKEN):
## staking
users can stake tokens - up to 20% of the total supply. When supply increases (more people buy then sell) stakers will keep the total supply they staked. So 1% of staked total supply remains 1%.
- **Token**: KRAIKEN (KRK) — minted on buys from the LiquidityManager's Uniswap V3 positions, burned on sells
- **Staking**: Users stake tokens and declare a self-assessed tax rate. Stakers maintain percentage ownership of total supply as it grows.
- **Snatching**: Any position can be taken by someone willing to pay a higher tax rate, creating a competitive prediction market for token value
- **Tax collection**: Automated by the transaction bot (`services/txnBot/`)
## Evolution
## landing
in the landing folder in this repository you find the front-end implementation.
Stage 1's static liquidity strategy evolved into Stage 2's three-position dynamic strategy with OptimizerV3. The Harberger staking mechanism now serves as the sentiment oracle driving optimizer parameter selection. See [TECHNICAL_APPENDIX.md](TECHNICAL_APPENDIX.md) for details.
## contracts
in the onchain folder are the smart contracts implementing the token and the economy
## services
1 bot collecting taxes on old stakes and liquidate stakers if tax is not paid
1 bot calling recenter on the liquidity provider contract
## subgraph
- data backend for front-end for landing project
## hosting
- crypto friendly

View file

@ -1,233 +0,0 @@
# Ponder LM Indexing - Backend Metrics Implementation
**Branch:** feat/ponder-lm-indexing
**Commit:** 3ec9bfb
**Date:** 2026-02-16
## Summary
Successfully implemented backend indexing for three key protocol metrics:
1. **ETH Reserve Growth (7d)**
2. **Floor Price per KRK**
3. **Trading Fees (7d)** ⚠️ Infrastructure ready, awaiting implementation
## Changes Made
### 1. Schema Updates (`ponder.schema.ts`)
#### Extended `stats` Table
Added fields to track new metrics:
```typescript
// 7-day ETH reserve growth metrics
ethReserve7dAgo: bigint (nullable)
ethReserveGrowthBps: int (nullable) // basis points
// 7-day trading fees earned
feesEarned7dEth: bigint (default 0n)
feesEarned7dKrk: bigint (default 0n)
feesLastUpdated: bigint (nullable)
// Floor price metrics
floorTick: int (nullable)
floorPriceWei: bigint (nullable) // wei per KRK
currentPriceWei: bigint (nullable)
floorDistanceBps: int (nullable) // distance from floor in bps
```
#### New Tables
- **`ethReserveHistory`**: Tracks ETH balance over time for 7-day growth calculations
- `id` (string): block_logIndex format
- `timestamp` (bigint): event timestamp
- `ethBalance` (bigint): ETH reserve at that time
- **`feeHistory`**: Infrastructure for fee tracking (ready for Collect events)
- `id` (string): block_logIndex format
- `timestamp` (bigint): event timestamp
- `ethFees` (bigint): ETH fees collected
- `krkFees` (bigint): KRK fees collected
### 2. Handler Updates (`src/lm.ts`)
#### New Helper Functions
- **`priceFromTick(tick: number): bigint`**
- Calculates price in wei per KRK from Uniswap V3 tick
- Uses formula: `price = 1.0001^tick`
- Accounts for WETH as token0 in the pool
- Returns wei-denominated price for precision
- **`calculateBps(newValue: bigint, oldValue: bigint): number`**
- Calculates basis points difference: `(new - old) / old * 10000`
- Used for growth percentages and distance metrics
#### Updated Event Handlers
**`EthScarcity` and `EthAbundance` Handlers:**
1. **Record ETH Reserve History**
- Insert ethBalance into `ethReserveHistory` table
- Enables time-series analysis
2. **Calculate 7-Day Growth**
- Look back 7 days in `ethReserveHistory`
- Find oldest record within window
- Calculate growth in basis points
- Updates: `ethReserve7dAgo`, `ethReserveGrowthBps`
3. **Calculate Floor Price**
- Uses `vwapTick` from event as floor tick
- Converts to wei per KRK using `priceFromTick()`
- Updates: `floorTick`, `floorPriceWei`
4. **Calculate Current Price**
- Uses `currentTick` from event
- Converts to wei per KRK
- Updates: `currentPriceWei`
5. **Calculate Floor Distance**
- Computes distance from floor in basis points
- Formula: `(currentPrice - floorPrice) / floorPrice * 10000`
- Updates: `floorDistanceBps`
**`Recentered` Handler:**
- Cleaned up: removed direct ETH balance reading
- Now relies on EthScarcity/EthAbundance events for balance data
- Maintains counter updates for recenter tracking
## Fee Tracking Status
### Current State: Infrastructure Ready ⚠️
The fee tracking infrastructure is in place but **not yet populated** with data:
- `feeHistory` table exists in schema
- `feesEarned7dEth` and `feesEarned7dKrk` fields default to `0n`
- `feesLastUpdated` field available
### Implementation Options
Documented two approaches in code:
#### Option 1: Uniswap V3 Pool Collect Events (Recommended)
**Pros:**
- Accurate fee data directly from pool
- Clean separation of concerns
**Cons:**
- Requires adding UniswapV3Pool contract to `ponder.config.ts`
- **Forces a full re-sync from startBlock** (significant downtime)
**Implementation Steps:**
1. Add pool contract to `ponder.config.ts`:
```typescript
UniswapV3Pool: {
abi: UniswapV3PoolAbi,
chain: NETWORK,
address: '<pool-address>',
startBlock: selectedNetwork.contracts.startBlock,
}
```
2. Add handler for `Collect(address owner, int24 tickLower, int24 tickUpper, uint128 amount0, uint128 amount1)`
3. Filter for LM contract as owner
4. Record to `feeHistory` table
5. Calculate 7-day rolling totals
#### Option 2: Derive from ETH Balance Changes
**Pros:**
- No config changes needed
- No resync required
**Cons:**
- Less accurate (hard to isolate fees from other balance changes)
- More complex logic
- Potential edge cases
### Recommendation
**Wait for next planned resync** or **maintenance window** to implement Option 1 (Collect events). This provides the most accurate and maintainable solution.
## Verification
All success criteria met:
**Schema compiles** (valid TypeScript)
```bash
npm run build
# ✓ Wrote ponder-env.d.ts
```
**New fields added to stats**
- ethReserve7dAgo, ethReserveGrowthBps
- feesEarned7dEth, feesEarned7dKrk, feesLastUpdated
- floorTick, floorPriceWei, currentPriceWei, floorDistanceBps
**EthScarcity/EthAbundance handlers updated**
- Record history to `ethReserveHistory`
- Calculate 7-day growth
- Calculate floor and current prices
- Calculate floor distance
**Fee tracking infrastructure**
- `feeHistory` table created
- Fee fields in stats table
- Documentation for implementation approaches
**Git commit with --no-verify**
```bash
git log -1 --oneline
# 3ec9bfb feat(ponder): add ETH reserve growth, floor price, and fee tracking metrics
```
**Linting passes**
```bash
npm run lint
# (no errors)
```
## Testing Recommendations
1. **Deploy to staging** and verify:
- `ethReserveHistory` table populates on Scarcity/Abundance events
- 7-day growth calculates correctly after 7 days of data
- Floor price calculations match expected values
- Current price tracks tick movements
2. **API Integration:**
- Query `stats` table for dashboard metrics
- Use `ethReserveHistory` for time-series charts
- Monitor for null values in first 7 days (expected)
3. **Future Fee Implementation:**
- Plan maintenance window for resync
- Test Collect event handler on local fork first
- Verify fee calculations match pool data
## Technical Notes
### Price Calculation Details
- **Formula:** `price = 1.0001^tick`
- **Token Order:** WETH (0x4200...0006) < KRK (0xff196f...) WETH is token0
- **Conversion:** Price in KRK/WETH → invert to get wei per KRK
- **Precision:** Uses `BigInt` for wei-level accuracy, floating point only for tick math
### 7-Day Lookback Strategy
- **Simple approach:** Query `ethReserveHistory` for oldest record ≥ 7 days ago
- **Performance:** Acceptable given low event volume (~50-200 recenters/week)
- **Edge case:** Returns `null` if less than 7 days of history exists
### Data Consistency
- Both EthScarcity and EthAbundance handlers implement identical logic
- Ensures consistent metrics regardless of recenter direction
- History records use `block_logIndex` format for unique IDs
## Files Modified
- `/home/debian/harb/services/ponder/ponder.schema.ts` (+50 lines)
- `/home/debian/harb/services/ponder/src/lm.ts` (+139 lines, -32 lines)
**Total:** +157 lines, comprehensive implementation with documentation.
---
**Status:** ✅ Ready for staging deployment
**Next Steps:** Monitor metrics in staging, plan fee implementation during next maintenance window

124
README.md
View file

@ -1,113 +1,39 @@
# KRAIKEN
$HRB is a gig to become successful in DeFi. It is a protocol that implements the fairest ponzi in the world.
The fairest ponzi in the world.
This repository structures our approach and manages our collaboration to achieve this goal.
KRAIKEN is a DeFi protocol that couples Harberger-tax staking with a dominant Uniswap V3 liquidity manager. The result: asymmetric slippage, sentiment-driven pricing, and VWAP-based price memory that protects the protocol from exploitation.
Deployed on [Base](https://base.org).
## Project Milestones
## The Three Stages
The fairest ponzi in the world will be launched in 3 stages, each representing a more advanced version of the previous one.
1. **Harberger** — A staking market balanced by the Harberger tax. *Complete.*
2. **KRAIKEN** — Token issuance governed by an automated liquidity manager. *Current stage.*
3. **SoverAIgns** — The liquidity manager augmented by AI for outlandish performance. *Future.*
1. [Harberg](HARBERG.md) - a staking market and an speculative laverage platform.
2. KrAIken - Harberg, but token issuance is governed by an automated liquidity manager.
3. SoverAIgns - KrAIKen, but the liquidity manager is augmented by AI and deliveres outlandish performance
## How It Works
### Three-Position Liquidity Strategy
## Project Values and Organization
- the core value and mantra of the project is: **ship, ship,** :ship:
- delivery is valued highest and goes over quality or communication
- if you see work, do it. most likely every-one but you will lose interest in the project, and you will deliver it by yourself. work this way, take responsibility for everything. document everything methodically in this repository, use .md files, commits, issues(feature request, support issue), and pull requests. if other people still follow this repository collaboration will emerge, and duplication of work will be avoided automatically.
- **no structured communication outside of this repository** is relevant for the success, nor will it be rewarded.
The LiquidityManager maintains three Uniswap V3 positions simultaneously:
### open questions
- multisig? keyholders?
- payout, shares?
- **Anchor** — Shallow liquidity near the current price. Fast price discovery, high slippage for attackers.
- **Discovery** — Medium liquidity bordering the anchor. The fee capture zone.
- **Floor** — Deep liquidity at VWAP-adjusted distance. Price memory that protects against whale dumps.
## Revenue Sources
- the tax paid by the stakers will be forwarded to the multisig
- the liquidity manager contract will collect all liquidity fees and forward them to the multisig
- at launch of each stage of the project the keyholders will invest a share of the [multisig]() holdings and coordinate to sell at a favorable time. all profits from all sales are the multisigs profits.
Any round-trip trade (buy → recenter → sell) pays disproportionate slippage costs twice, making manipulation unprofitable.
## Timeline
it would be great if we can launch stage 1 or even 2 for DevCon.
### Harberger Tax Sentiment Oracle
Stakers self-assess tax rates on their positions. Higher tax = higher confidence. Positions can be snatched by anyone willing to pay more. This creates a continuous prediction market for token sentiment.
## Kick-off Call Harberg
Agenda
### OptimizerV3
Reads staking data (% staked, average tax rate) and outputs a binary bear/bull configuration:
- **Bear** (~94% of state space): AS=30%, AW=100, CI=0, DD=0.3e18 — protective
- **Bull** (>91% staked, low tax): AS=100%, AW=20, CI=0, DD=1e18 — aggressive fee capture
The binary step avoids the AW 40-80 kill zone where intermediate parameters are exploitable.
### VWAP Floor Defense
The floor position uses volume-weighted average price with directional recording (buys only). During sell pressure, the VWAP-to-price distance grows, making the floor resist walkdown. This gives the protocol "eternal memory" against dormant whale attacks.
## Tech Stack
| Component | Technology | Location |
|-----------|-----------|----------|
| Smart Contracts | Solidity, Foundry | `onchain/` |
| Indexer | Ponder (TypeScript) | `services/ponder/` |
| Staking App | Vue 3, Vite, Wagmi | `web-app/` |
| Landing Page | Vue 3, Vite | `landing/` |
| Automation Bot | Node.js, Express | `services/txnBot/` |
| Shared Library | TypeScript | `kraiken-lib/` |
| Block Explorer | Otterscan | Docker service |
| Reverse Proxy | Caddy | Docker service |
## Repository Structure
```
harb/
├── onchain/ # Solidity contracts, tests, deployment scripts, analysis
│ ├── src/ # Core: Kraiken, Stake, LiquidityManager, OptimizerV3
│ ├── test/ # Foundry test suite
│ ├── script/ # Deployment scripts
│ └── analysis/ # Fuzzing, parameter sweeps, security review
├── services/
│ ├── ponder/ # Blockchain indexer → GraphQL API
│ └── txnBot/ # recenter() + payTax() automation
├── web-app/ # Staking dashboard (Vue 3)
├── landing/ # Marketing site (Vue 3)
├── kraiken-lib/ # Shared TypeScript helpers and ABIs
├── tests/e2e/ # Playwright end-to-end tests
├── scripts/ # Dev environment, CI bootstrap, utilities
├── docker/ # CI Dockerfiles
├── containers/ # Entrypoints, Caddyfile
└── docs/ # Deployment runbook, Docker guide
```
## Quick Start
```bash
# Prerequisites: Docker Engine (Linux) or Colima (Mac)
# See docs/docker.md for installation
nohup ./scripts/dev.sh start & # Start full stack (~3-6 min first time)
tail -f nohup.out # Watch progress
./scripts/dev.sh health # Verify all services healthy
```
Access points (via Caddy on port 8081):
- Landing: http://localhost:8081/
- Staking app: http://localhost:8081/app/
- GraphQL: http://localhost:8081/api/graphql
## Contracts (Base Mainnet)
| Contract | Address |
|----------|---------|
| Kraiken | `0x45caa5929f6ee038039984205bdecf968b954820` |
| Stake | `0xed70707fab05d973ad41eae8d17e2bcd36192cfc` |
| LiquidityManager | `0x7fd4e645ce258dd3942eddbeb2f99137da8ba13b` |
## Documentation
- [AGENTS.md](AGENTS.md) — Development guide and operational reference
- [TECHNICAL_APPENDIX.md](TECHNICAL_APPENDIX.md) — Deep technical analysis of protocol mechanics
- [docs/DEPLOYMENT_RUNBOOK.md](docs/DEPLOYMENT_RUNBOOK.md) — Production deployment guide
- [onchain/UNISWAP_V3_MATH.md](onchain/UNISWAP_V3_MATH.md) — Uniswap V3 math reference
- [onchain/analysis/SECURITY_REVIEW.md](onchain/analysis/SECURITY_REVIEW.md) — Security analysis and fuzzing results
## License
GPL-3.0-or-later
- [design doc](https://hackmd.io/JvxEI0fnR_uZsIrrBm95Qw)
- [Liquidity Provisioning in KRAIKEN](https://hackmd.io/yNiN3TyETT2A1uwQVGYiSA)

View file

@ -1,42 +0,0 @@
# RESOURCES.md — Project Capability Inventory
## evolution
- type: compute
- capability: run harb agents (dev, review, action, gardener, planner, predictor, supervisor), run formulas (red-team, evolution, holdout, user-test)
- agents: dev, review, action, gardener, supervisor, planner, predictor
- ram: 8GB
- note: dedicated to harb — all agent and formula workloads run here
- dispatch: file an issue with the `action` label. The action-poll picks it up and runs the referenced formula. See `formulas/*.toml` in this repo for available formulas.
- constraint: only one formula can run at a time (port 8545 shared by red-team, evolution, holdout, user-test). Dev agents run concurrently with formulas.
## codeberg-johba
- type: source-control
- capability: host repo, issue tracker, PR workflow, API access
- repo: johba/harb
- note: owner account
## codeberg-disinto-bot
- type: source-control
- capability: review PRs, merge PRs, push branches
- repo: johba/harb
- note: bot account, push+pull permissions, no admin
## woodpecker-ci
- type: ci
- capability: run pipelines on PR and push events, docker backend
- note: hosted on harb-staging, triggers via Codeberg webhook
## base-mainnet-rpc
- type: infrastructure
- capability: Base L2 mainnet access for on-chain queries, event logs, pool data
- env: INFURA_API_KEY
- note: used by evidence formulas (red-team, evolution)
## Available formulas (this repo)
- run-red-team.toml — adversarial attack suite against the optimizer
- run-evolution.toml — Push3 evolution pipeline, mutate and select optimizer candidates
- run-holdout.toml — holdout evaluation of evolved candidates
- run-user-test.toml — simulated user interaction testing
- run-resources.toml — collect disk/RAM/API usage metrics
- run-protocol.toml — query on-chain protocol health (TVL, fees, positions)
- dispatch: create an issue with `action` label, body references the formula name

View file

@ -1,55 +0,0 @@
# STATE.md — What harb currently is and does
- [2026-03-13] Evolution pipeline works end-to-end: Push3 → transpile → compile → revm fitness evaluation → selection (#665)
- [2026-03-13] Diverse seed generation for evolution population (#656)
- [2026-03-13] Crossover operator for Push3 programs (#657)
- [2026-03-13] Elitism preserves top N candidates unchanged across generations (#643)
- [2026-03-13] Gas limit as evolutionary fitness pressure (#645)
- [2026-03-13] Default bear outputs for crashed/broken Push3 programs (#651)
- [2026-03-13] Normalized inputs for Push3 optimizer (0..1e18 indicators) (#649)
- [2026-03-13] Bootstrap VWAP with seed trade during deployment (#633)
- [2026-03-13] e2e tests skip for tools-only and docs-only PRs (#641)
- [2026-03-13] Issue templates for bug, feature, push3-seed, refactor (#678)
- [2026-03-13] revm fitness evaluator with UUPS bypass and graceful attack ops (#629)
- [2026-03-12] Dark factory: dev-agent, review-agent, supervisor with cron */10 staggered
- [2026-03-12] CI: single build-and-test pipeline + e2e with path filtering
- [2026-03-12] Ponder indexing: transfers, mints, burns, staking, protocol stats
- [2026-03-12] Landing page with LiveStats, WalletCard, contract addresses
- [2026-03-12] Staking app with position dashboard and P&L tracking
- [2026-03-12] OptimizerV3 with Push3 transpiler output injection
- [2026-03-12] Three-position strategy: Floor, Anchor, Discovery
- [2026-03-12] VWAPTracker for price oracle
- [2026-03-12] Harberger tax staking mechanism
- [2026-03-13] LLM seed — Momentum Follower optimizer (#695)
- [2026-03-14] evolve.sh auto-incrementing per-run results directory (#752)
- [2026-03-14] EVAL_MODE now defaults to revm (#751)
- [2026-03-14] LLM seed — Defensive Floor Hugger optimizer (#672)
- [2026-03-14] evolve.sh stale tmpdirs break subsequent runs (#750)
- [2026-03-14] evolve.sh silences all batch-eval errors with 2>/dev/null (#749)
- [2026-03-14] evolution-daemon.sh — perpetual evolution loop on DO box (#748)
- [2026-03-14] No mainnet VWAP bootstrap runbook (#728)
- [2026-03-14] fitness.sh individual-scoring path still silences errors (#766)
- [2026-03-14] bootstrap.sh anvil_setCode guard now targets correct feeDest 0xf6a3... (#760)
- [2026-03-14] llm_contrarian.push3 AW=150/250 clamped to 100 — three rounds unaddressed (#756)
- [2026-03-14] bootstrap.sh hardcodes BASE_SEPOLIA_LOCAL_FORK even on mainnet forks (#746)
- [2026-03-14] remove MAX_ANCHOR_WIDTH clamp in ThreePositionStrategy (#783)
- [2026-03-15] re-add MAX_ANCHOR_WIDTH=1233 guard at LiquidityManager call site; anchorWidth clamped before _setPositions, independent of Optimizer (#817)
- [2026-03-14] increase CALCULATE_PARAMS_GAS_LIMIT from 200k to 500k (#782)
- [2026-03-15] add evolution run 8 champion to seed pool (#781)
- [2026-03-15] fix FitnessEvaluator.t.sol broken on Base mainnet fork (#780)
- [2026-03-15] No generic flag dispatch: only `token_value_inflation` is ever zero-rated (#723)
- [2026-03-15] `llm`-origin entries in manifest have null fitness and no evaluation path (#724): evaluate-seeds.sh scores null-fitness seeds and writes results back to manifest.jsonl
- [2026-03-15] manifest.jsonl schema has no canonical machine-readable definition (#720)
- [2026-03-15] CID format change silently drops historical generation JSONL on re-admission (#757): warn on unrecognised CID format instead of silently skipping
- [2026-03-15] evolve.sh does not write `note` field — schema drift between hand-written and evolved entries (#719): auto-generate note "Evolved from <seed> (run<N> gen<G>)" for every admitted entry
- [2026-03-15] No-op varCounter assignment before false branch in processExecIf (#655)
- [2026-03-15] Old-format CIDs are warned but still silently dropped from the pool (#801): legacy CID warning made explicit (migration not supported), CID format contract documented in comment
- [2026-03-15] red-team.sh and export-attacks.py use Base Sepolia addresses labeled as mainnet (#794): replace Sepolia SWAP_ROUTER and V3_FACTORY with correct Base mainnet addresses; add Basescan source-link comments
- [2026-03-15] evo_run007_champion.push3 always returns fixed params regardless of staking (#791)
- [2026-03-15] evo_run007_champion.push3 note has same CI/DD inversion (#790)
- [2026-03-15] txnBot AGENTS.md ENVIRONMENT enum is stale (#784)
- [2026-03-20] Adoption milestone state ambiguity in MEMORY.md (#1068)
- [2026-03-20] OptimizerV3Push3 as IOptimizer always returns bear defaults — integration risk (#1063)
- [2026-03-20] implement evidence/resources and evidence/protocol logging (#1059): formulas/run-resources.toml (disk/RAM/API/CI sense formula, daily cron 06:00 UTC) and formulas/run-protocol.toml (TVL/fees/positions/rebalances sense formula, daily cron 07:00 UTC); evidence/resources/ and evidence/protocol/ directories; schemas in evidence/README.md
- [2026-03-21] Optimizer and OptimizerV3 lack _disableInitializers() in constructor (#1055)
- [2026-03-21] evolution formula must commit results via PR before closing (#1047)

View file

@ -1,6 +1,6 @@
# Technical Appendix
This document provides detailed technical analysis and implementation details for the KRAIKEN protocol's core innovations. For a high-level overview, see [README.md](README.md).
This document provides detailed technical analysis and implementation details for the KRAIKEN protocol's core innovations. For a high-level overview, see AGENTS.md.
## Asymmetric Slippage Strategy
@ -51,24 +51,11 @@ Double-overflow scenarios requiring >1000x compression would need:
- **Conclusion**: 1000x compression limit provides adequate protection against realistic scenarios
### Implementation Details
**FLOOR Position Calculation (Unified Formula):**
**FLOOR Position Calculation:**
```
floorTick = max(scarcityTick, mirrorTick, clampTick) toward KRK-cheap side
FLOOR_PRICE = VWAP_PRICE * (0.7 + CAPITAL_INEFFICIENCY)
```
Three signals determine the floor:
- **scarcityTick**: derived from `vwapX96` and ETH/supply ratio. Dominates when ETH is scarce.
- **mirrorTick**: `currentTick + |adjustedVwapTick - currentTick|` on KRK-cheap side. Reflects VWAP distance symmetrically. During sell pressure the mirror distance grows, resisting floor walkdown.
- **clampTick**: minimum distance from anchor edge. `anchorSpacing = 200 + (34 × 20 × AW / 100)` ticks.
**VWAP Mirror Defense:**
- During sell-heavy trading, the current tick drops but VWAP stays higher, so mirror distance *grows* — floor naturally resists being walked down.
- CI controls mirror distance through `getAdjustedVWAP(CI)` with no magic numbers. CI=0% is safest (proven zero effect on fee revenue).
**Directional VWAP Recording:**
- VWAP only records on ETH inflow (buys into the LM), preventing attackers from diluting VWAP with sells.
- `shouldRecordVWAP` compares `lastRecenterTick` to current tick to detect direction.
**Protection Mechanism:**
- VWAP provides "eternal memory" of historical trading activity
- Compression algorithm ensures memory persists even under extreme volume
@ -89,26 +76,26 @@ Three signals determine the floor:
- **Average Tax Rate**: Weighted average of all staking tax rates
- **Tax Rate Distribution**: Spread of tax rates across stakers
### OptimizerV3 Integration
**Direct 2D Binary Mapping (no intermediate score):**
OptimizerV3 reads `percentageStaked` and `averageTaxRate` from the Stake contract and maps them directly to one of two configurations:
- `staked ≤ 91%` → always **BEAR**: AS=30%, AW=100, CI=0, DD=0.3e18
- `staked > 91%`**BULL** if `deltaS³ × effIdx / 20 < 50`: AS=100%, AW=20, CI=0, DD=1e18
The binary step avoids the AW 40-80 kill zone where intermediate parameters are exploitable. Bull requires >91% staked with low enough tax; any decline snaps to bear instantly.
**Parameter Safety (proven via 1050-combo 4D sweep):**
- CI=0% always (zero effect on fee revenue, maximum protection)
- Fee revenue is parameter-independent (~1.5 ETH/cycle across all combos)
- Safety comes entirely from the AS×AW configuration
### Optimizer Integration
**Sentiment Analysis:**
```solidity
function getLiquidityParams() returns (
uint256 capitalInefficiency,
uint256 anchorShare,
uint24 anchorWidth,
uint256 discoveryDepth
) {
// Analyze staking data to determine optimal liquidity parameters
// Higher confidence (tax rates) → more aggressive positioning
// Lower confidence → more conservative positioning
}
```
### Economic Incentives
- **Tax Revenue**: Funds protocol operations and incentivizes participation
- **Staking Benefits**: Percentage ownership of total supply (rather than fixed token amounts)
- **Prediction Market**: Tax rates create market-based sentiment signals
- **Liquidity Optimization**: Sentiment data feeds into binary bear/bull parameter selection
- **Liquidity Optimization**: Sentiment data feeds into dynamic parameter adjustment
## Position Dependencies Technical Details
@ -143,7 +130,7 @@ The binary step avoids the AW 40-80 kill zone where intermediate parameters are
### Key Contracts
- **LiquidityManager.sol**: Core three-position strategy implementation
- **VWAPTracker.sol**: Historical price memory and compression algorithm
- **OptimizerV3.sol**: Sentiment-driven binary bear/bull parameter selection (UUPS upgradeable)
- **Optimizer.sol**: Sentiment analysis and parameter optimization
- **Stake.sol**: Harberger tax mechanism and sentiment data collection
### Analysis Tools
@ -152,6 +139,5 @@ The binary step avoids the AW 40-80 kill zone where intermediate parameters are
- **Scenario Visualization**: Tools for understanding liquidity dynamics
### Related Documentation
- **README.md**: Project overview
- **AGENTS.md**: Development and operational guidance
- **AGENTS.md**: High-level overview and development guidance
- **`/onchain/analysis/README.md`**: Detailed analysis tool usage

View file

@ -1,121 +0,0 @@
# Kraiken User Test Report v2
**Date:** 2026-02-14
**Branch:** `feat/ponder-lm-indexing`
**Stack:** Local fork (Anvil + Bootstrap + Ponder + Web-app + Landing)
## Executive Summary
Two test suites targeting distinct user funnels:
- **Test A (Passive Holder):** 9/9 passed ✅ — Landing page → Get KRK → Return value
- **Test B (Staker):** 7/12 passed (3 stake execution timeouts, 2 skipped) — Staking UI evaluation + docs audit
The tests surface **actionable UX friction** across both funnels. Core finding: **the passive holder funnel converts degens but loses newcomers and yield farmers.**
---
## Test A: Passive Holder Journey
### Tyler — Retail Degen ("sell me in 30 seconds")
| Metric | Result |
|--------|--------|
| Would buy | ✅ Yes |
| Would return | ❌ No |
| Friction | Landing page is one-time conversion, no repeat visit value |
**Key insight:** Degens convert on first visit but have no reason to come back. The landing page needs live stats or a reason to revisit.
### Alex — Newcomer ("what even is this?")
| Metric | Result |
|--------|--------|
| Would buy | ❌ No |
| Would return | ❌ No |
| Friction | No beginner explanation, no trust signals, no step-by-step guide, unclear value prop |
**Key insight:** Newcomers bounce. The landing page assumes crypto literacy. Needs: "What is this?" section, social proof, getting started guide.
### Sarah — Yield Farmer ("is this worth my time?")
| Metric | Result |
|--------|--------|
| Would buy | ❌ No |
| Would return | ❌ No |
| Friction | No APY/yield display, no risk indicators, no audit info, can't verify liquidity, no monitoring tools |
**Key insight:** Yield farmers need numbers upfront. Without APY estimates, risk metrics, or audit credentials, they won't invest time to understand the protocol.
---
## Test B: Staker Journey
### Priya — Institutional ("show me the docs")
**Steps completed:** Setup ✅, Documentation audit ✅, UI quality ✅, Stake execution ⏱ (timeout)
**Documentation Audit:**
- ✅ Documentation link visible
- ✅ Found 5 contract addresses — can verify on Etherscan
- ⚠ No copy button for addresses — minor friction
- ✅ Audit report accessible
- ⚠ Protocol parameters not displayed
- ⚠ No source code link (Codeberg/GitHub)
**UI Quality:**
- ✅ Found 39 precise numbers — good data quality
- ⚠ No indication if data is live or stale
- ✅ Input validation present
- ✅ Clear units on all values
### Marcus — Degen/MEV ("where's the edge?")
**Steps completed:** Setup ✅, Interface analysis ✅, Stake execution ⏱ (timeout)
### Sarah — Yield Farmer ("what are the risks?")
**Steps completed:** Setup ✅, Risk evaluation ✅, Stake execution ⏱ (timeout)
**Note:** Stake execution tests timeout because the test wallet interaction (fill amount → select tax → click stake) doesn't match the actual UI component structure. This is a test scaffolding issue, not a UX issue.
---
## Findings by Priority
### 🔴 Critical (Blocking Conversion)
1. **No APY/yield indicator on landing page** — Yield farmers and passive holders need a number to anchor on. Even "indicative rate" or "protocol performance" would help.
2. **No beginner explanation** — Newcomers have zero context. Need a "What is Kraiken?" section in plain English.
3. **Landing page is one-time only** — No reason to return after first visit. Protocol Health section exists but needs real data.
### 🟡 Important (Reduces Trust)
4. **No audit/security credentials visible** — Sarah and Priya both flagged this. Link to audit report, bug bounty, or security practices.
5. **No source code link** — Institutional users want to verify. Link to Codeberg repo.
6. **Data freshness unclear** — Priya noted: "No indication if data is live or stale." Add timestamps or "live" indicators.
7. **No copy button for contract addresses** — Minor but Priya flagged it for verification workflow.
### 🟢 Nice to Have
8. **Protocol parameters not displayed** — Advanced users want to see CI, AS, AW values.
9. **Step-by-step getting started guide on landing** — Exists on docs but not on landing page.
10. **Social proof / community links** — Tyler would convert faster with Discord/Twitter presence visible.
---
## Recommendations
### For Passive Holders (Landing Page)
1. Add **indicative APY** or protocol performance metric (even with disclaimer)
2. Add "What is Kraiken?" explainer in 2-3 sentences for newcomers
3. Make Protocol Health section show **live data** (holder count, ETH reserve, supply growth)
4. Add **trust signals**: audit link, team/project background, community links
5. Add "Last updated" timestamps to stats
### For Stakers (Web App)
1. Add **copy button** next to contract addresses
2. Add **data freshness indicator** (live dot, last updated timestamp)
3. Link to **source code** (Codeberg repo)
4. Display **protocol parameters** (current optimizer settings)
### For Both
1. The ProtocolStatsCard component was built (commit `a0aca16`) but needs integration into the landing page with real Ponder data
2. Bootstrap V3 swap is broken (sqrtPriceLimitX96=0 gives empty swap) — not blocking for mainnet but blocks local testing
---
## Test Infrastructure Notes
- **buyKrk helper** uses direct KRK transfer from deployer (Anvil #0) — V3 pool swap broken on local fork due to pool initialization at min tick
- **Stake execution tests** need UI component alignment — test expects `getByLabel(/staking amount/i)` but actual component may use different structure
- **Chain snapshots** work correctly for state isolation between personas
- **Test A is fully stable** and can be run as regression

View file

@ -9,7 +9,7 @@ The Kraiken protocol now includes a **version validation system** that ensures a
```
┌─────────────────────────────────────┐
│ Kraiken.sol │
│ uint256 public constant VERSION=2 │ ← Source of Truth
│ uint256 public constant VERSION=1 │ ← Source of Truth
└──────────────┬──────────────────────┘
│ read at startup
@ -42,8 +42,8 @@ contract Kraiken is ERC20, ERC20Permit {
* @notice Protocol version for data structure compatibility.
* Increment when making breaking changes to TAX_RATES, events, or core data structures.
*/
uint256 public constant VERSION = 2;
uint256 public constant VERSION = 1;
// ...
}
```
@ -58,9 +58,9 @@ contract Kraiken is ERC20, ERC20Permit {
**File:** `kraiken-lib/src/version.ts`
```typescript
export const KRAIKEN_LIB_VERSION = 2;
export const KRAIKEN_LIB_VERSION = 1;
export const COMPATIBLE_CONTRACT_VERSIONS = [1, 2];
export const COMPATIBLE_CONTRACT_VERSIONS = [1];
export function isCompatibleVersion(contractVersion: number): boolean {
return COMPATIBLE_CONTRACT_VERSIONS.includes(contractVersion);
@ -124,20 +124,28 @@ export function useVersionCheck() {
### 5. CI/CD Validation
**File:** `.woodpecker/release.yml` (version-check step)
**File:** `.github/workflows/validate-version.yml`
The Woodpecker release pipeline validates version consistency on tagged releases. The `version-check` step:
1. Builds kraiken-lib (including `sync-tax-rates.mjs`)
2. Runs an inline Node.js script that:
- Extracts `VERSION` from `Kraiken.sol`
- Extracts `KRAIKEN_LIB_VERSION` and `COMPATIBLE_CONTRACT_VERSIONS` from `kraiken-lib/src/version.ts`
- Fails if contract VERSION differs from lib VERSION
- Fails if contract VERSION is not in COMPATIBLE_CONTRACT_VERSIONS
```yaml
- name: Extract versions and validate
run: |
CONTRACT_VERSION=$(grep -oP 'VERSION\s*=\s*\K\d+' onchain/src/Kraiken.sol)
LIB_VERSION=$(grep -oP 'KRAIKEN_LIB_VERSION\s*=\s*\K\d+' kraiken-lib/src/version.ts)
COMPATIBLE=$(grep -oP 'COMPATIBLE_CONTRACT_VERSIONS\s*=\s*\[\K[^\]]+' kraiken-lib/src/version.ts)
if echo ",$COMPATIBLE," | grep -q ",$CONTRACT_VERSION,"; then
echo "✓ Version sync validated"
else
exit 1
fi
```
**Triggered on:** tag events (releases)
**Triggered on:**
- PRs touching `Kraiken.sol` or `version.ts`
- Pushes to `master`/`main`
**Prevents:**
- Releasing with incompatible versions
- Merging incompatible versions
- Deploying with stale kraiken-lib
## Workflows

View file

@ -1,61 +0,0 @@
# VISION.md — What "done" looks like
## What is harb
A DeFi protocol with a price-floor-backed token (KRK), governed by an AI-evolved optimizer that manages liquidity positions on Uniswap V3. Three user funnels: passive holders (buy and hold a floor-backed asset), stakers (leveraged directional exposure via Harberger tax as sentiment oracle), and competitors (snatch underpriced stakes for profit). The optimizer evolves through Push3 evolution and red-team adversarial testing.
## North star
Get live, learn from the market. The primary goal is having a real protocol with real users generating real data — not perfecting things in isolation. Everything else follows from that.
This project is AI-operated. Development, review, deployment, community support, analytics — all run by agents with minimal human escalation. The human sets direction and makes judgment calls. The machines handle execution, quality, and day-to-day operations. A high-quality project with a solid roadmap and growing community, delivered by an autonomous factory.
## Phase 1 — Quality gate & release pipeline
Before anything goes live, build confidence that the product works:
- **E2E quality gate**: automated tests covering every button, every page, desktop + mobile + all major browsers
- **Conversion funnel verification**: landing → Uniswap swap → staking app flow is smooth and measurable
- **Release pipeline**: fast, repeatable releases for frontend/backend updates. Contracts are immutable except the optimizer (upgradeable via UUPS).
- **Reusable for every release** — the quality gate runs on every deploy, not just launch
## Phase 2 — Coordinated launch
Not a soft launch. A planned, date-specific event:
- **Pre-launch**: create a pitch deck / PDF explaining the protocol to influencers — what KRK is, how to buy, how to stake, what the floor means
- **Influencer outreach**: coordinate with crypto influencers to amplify on the same date. They buy supply, stake, and market to their audience simultaneously.
- **Launch day**: deploy LiquidityManager, register token, create Uniswap pool. Coordinated influencer push creates initial volume → price action → organic discovery.
- **Goal**: broad base of holders from day one, not a slow trickle
## Phase 3 — Operations
Post-launch, the project needs sustained operations:
- **Analytics**: measure churn on landing page and staking page, track conversion funnel, user feedback loops
- **Fast iteration**: regular releases to fix issues, ship improvements based on user feedback
- **Influencer waves**: organize repeat coordinated pushes — influencers combine forces to create new bull cycles in the protocol
- **Community**: Discord (or similar) with:
- AI support bots trained on the protocol (help users swap, stake, understand the floor)
- Sentiment monitoring + regular community health reports
- Direct feedback channel to dev team
- **Optimizer governance**: release new evolved optimizers, eventually create a staker voting system for decentralized community-selected optimizer upgrades
- **txnBot**: automated on-chain operations — recenter triggers, protocol health monitoring, transaction execution
## What we're NOT building
- No governance token (KRK is the token, staking IS governance exposure)
- No cross-chain (Base only for now)
- No yield farming / liquidity mining incentives
- No centralized exchange listings (Uniswap is the market)
- No mobile app (responsive web only)
## What "launched" means (minimum)
1. Quality gate passes on landing + staking app (desktop + mobile)
2. Pitch deck exists and is reviewed
3. At least 3 influencers committed to launch day
4. LiquidityManager deployed on Base mainnet
5. KRK token registered, Uniswap pool created and funded
6. Analytics in place (basic funnel tracking)
7. Community channel open with at least one support bot

View file

@ -3,7 +3,6 @@
reverse_proxy webapp:5173
}
route /api/graphql* {
header Cache-Control "public, max-age=5"
uri strip_prefix /api
reverse_proxy ponder:42069
}
@ -18,9 +17,5 @@
uri strip_prefix /api/txn
reverse_proxy txn-bot:43069
}
route /analytics* {
uri strip_prefix /analytics
reverse_proxy umami:3000
}
reverse_proxy landing:5174
}

View file

@ -2,25 +2,7 @@
set -euo pipefail
MNEMONIC_FILE=/workspace/onchain/.secret.local
ANVIL_STATE_DIR=/home/foundry/.foundry/anvil/tmp
# Cleanup ALL old state snapshots on start + periodic cleanup in background
# Anvil fork mode generates thousands of JSON snapshots that fill disk fast
if [[ -d "$ANVIL_STATE_DIR" ]]; then
echo "[anvil] Cleaning up all state snapshots..."
rm -rf "$ANVIL_STATE_DIR"/* 2>/dev/null || true
fi
# Background cleanup: every 6 hours, delete snapshots older than 1 hour
(while true; do
sleep 21600
if [[ -d "$ANVIL_STATE_DIR" ]]; then
find "$ANVIL_STATE_DIR" -type f -name "*.json" -mmin +60 -delete 2>/dev/null || true
find "$ANVIL_STATE_DIR" -type d -empty -delete 2>/dev/null || true
fi
done) &
ANVIL_CMD=(anvil --fork-url "${FORK_URL:-https://sepolia.base.org}" --chain-id 31337 --block-time 1 --host 0.0.0.0 --port 8545 --threads 4 --timeout 2000 --retries 2 --fork-retry-backoff 100 --steps-tracing --no-storage-caching)
ANVIL_CMD=(anvil --fork-url "${FORK_URL:-https://sepolia.base.org}" --chain-id 31337 --block-time 1 --host 0.0.0.0 --port 8545 --threads 4 --timeout 2000 --retries 2 --fork-retry-backoff 100)
if [[ -f "$MNEMONIC_FILE" ]]; then
MNEMONIC="$(tr -d '\n\r' <"$MNEMONIC_FILE")"

View file

@ -26,48 +26,48 @@ if [[ -n "$GIT_BRANCH" ]]; then
fi
fi
fi
STATE_DIR=$ROOT_DIR/tmp/containers
STATE_DIR=$ROOT_DIR/tmp/podman
LOG_DIR=$STATE_DIR/logs
SETUP_LOG=$LOG_DIR/setup.log
CONTRACT_ENV=$STATE_DIR/contracts.env
TXNBOT_ENV=$STATE_DIR/txnBot.env
MNEMONIC_FILE=$ROOT_DIR/onchain/.secret.local
mkdir -p "$LOG_DIR"
: >"$SETUP_LOG"
# ── Configure shared bootstrap variables ──
ANVIL_RPC=${ANVIL_RPC:-"http://anvil:8545"}
CONTRACT_ENV=$STATE_DIR/contracts.env
FEE_DEST=0xf6a3eef9088A255c32b6aD2025f83E57291D9011
WETH=0x4200000000000000000000000000000000000006
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
MAX_UINT=0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
# Derive NETWORK_NAME from FORK_URL if not explicitly set.
# Callers may override by exporting NETWORK_NAME before starting the stack.
# Chain ID 8453 = Base mainnet; anything else (including 84532 Base Sepolia) defaults to Sepolia fork.
if [[ -z "${NETWORK_NAME:-}" ]]; then
_fork_url="${FORK_URL:-}"
if [[ -n "$_fork_url" ]]; then
_chain_id=$(cast chain-id --rpc-url "$_fork_url" 2>/dev/null || echo "")
if [[ "$_chain_id" == "8453" ]]; then
NETWORK_NAME="BASE_MAINNET_LOCAL_FORK"
else
NETWORK_NAME="BASE_SEPOLIA_LOCAL_FORK"
fi
else
NETWORK_NAME="BASE_SEPOLIA_LOCAL_FORK"
fi
fi
DEFAULT_DEPLOYER_PK=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
DEFAULT_DEPLOYER_ADDR=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266
DEPLOYER_PK=${DEPLOYER_PK:-$DEFAULT_DEPLOYER_PK}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$DEFAULT_DEPLOYER_ADDR}
LOG_FILE=$SETUP_LOG
ONCHAIN_DIR=$ROOT_DIR/onchain
TXNBOT_FUND_VALUE=${TXNBOT_FUND_VALUE:-1ether}
# Source shared bootstrap functions
# shellcheck source=../scripts/bootstrap-common.sh
source "$ROOT_DIR/scripts/bootstrap-common.sh"
log() {
echo "[bootstrap] $*"
}
# ── Local-only helpers ─────────────────────────────────────────────────
BOOTSTRAP_START=$(date +%s%3N)
wait_for_rpc() {
for _ in {1..120}; do
if cast chain-id --rpc-url "$ANVIL_RPC" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
log "Timed out waiting for Anvil at $ANVIL_RPC"
return 1
}
maybe_set_deployer_from_mnemonic() {
if [[ -n "$DEPLOYER_PK" && "$DEPLOYER_PK" != "$DEFAULT_DEPLOYER_PK" ]]; then
if [[ -n "$DEPLOYER_PK" && -n "$DEPLOYER_ADDR" ]]; then
return
fi
if [[ -f "$MNEMONIC_FILE" ]]; then
@ -76,10 +76,12 @@ maybe_set_deployer_from_mnemonic() {
if [[ -n "$mnemonic" ]]; then
pk="$(cast wallet private-key --mnemonic "$mnemonic" --mnemonic-derivation-path "m/44'/60'/0'/0/0")"
addr="$(cast wallet address --private-key "$pk")"
DEPLOYER_PK=${pk}
DEPLOYER_ADDR=${addr}
DEPLOYER_PK=${DEPLOYER_PK:-$pk}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$addr}
fi
fi
DEPLOYER_PK=${DEPLOYER_PK:-$DEFAULT_DEPLOYER_PK}
DEPLOYER_ADDR=${DEPLOYER_ADDR:-$DEFAULT_DEPLOYER_ADDR}
}
derive_txnbot_wallet() {
@ -89,37 +91,137 @@ derive_txnbot_wallet() {
if [[ -n "$mnemonic" ]]; then
TXNBOT_PRIVATE_KEY="$(cast wallet private-key --mnemonic "$mnemonic" --mnemonic-index 2)"
TXNBOT_ADDRESS="$(cast wallet address --private-key "$TXNBOT_PRIVATE_KEY")"
bootstrap_log "Derived txnBot wallet: $TXNBOT_ADDRESS (account index 2)"
log "Derived txnBot wallet: $TXNBOT_ADDRESS (account index 2)"
return
fi
fi
# Fallback to hardcoded Anvil account 1
TXNBOT_PRIVATE_KEY=$DEFAULT_TXNBOT_PK
TXNBOT_ADDRESS=$DEFAULT_TXNBOT_ADDR
bootstrap_log "Using default txnBot wallet: $TXNBOT_ADDRESS"
TXNBOT_PRIVATE_KEY=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
TXNBOT_ADDRESS=0x70997970C51812dc3A010C7d01b50e0d17dc79C8
log "Using default txnBot wallet: $TXNBOT_ADDRESS"
}
run_forge_script() {
log "Deploying contracts to fork"
pushd "$ROOT_DIR/onchain" >/dev/null
forge script script/DeployLocal.sol --fork-url "$ANVIL_RPC" --broadcast >>"$SETUP_LOG" 2>&1
popd >/dev/null
}
extract_addresses() {
local run_file
run_file="$(ls -t "$ROOT_DIR/onchain/broadcast/DeployLocal.sol"/*/run-latest.json 2>/dev/null | head -n1)"
if [[ -z "$run_file" ]]; then
log "Deployment artifact not found"
exit 1
fi
log "Using artifact ${run_file#$ROOT_DIR/}"
LIQUIDITY_MANAGER="$(jq -r '.transactions[] | select(.contractName=="LiquidityManager") | .contractAddress' "$run_file" | head -n1)"
KRAIKEN="$(jq -r '.transactions[] | select(.contractName=="Kraiken") | .contractAddress' "$run_file" | head -n1)"
STAKE="$(jq -r '.transactions[] | select(.contractName=="Stake") | .contractAddress' "$run_file" | head -n1)"
DEPLOY_BLOCK="$(jq -r '.receipts[0].blockNumber' "$run_file" | xargs printf "%d")"
if [[ -z "$LIQUIDITY_MANAGER" || "$LIQUIDITY_MANAGER" == "null" ]]; then
log "LiquidityManager address missing"
exit 1
fi
cat >"$CONTRACT_ENV" <<EOCONTRACTS
LIQUIDITY_MANAGER=$LIQUIDITY_MANAGER
KRAIKEN=$KRAIKEN
STAKE=$STAKE
EOCONTRACTS
}
fund_liquidity_manager() {
log "Funding LiquidityManager"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$LIQUIDITY_MANAGER" --value 0.1ether >>"$SETUP_LOG" 2>&1
}
grant_recenter_access() {
log "Granting recenter access"
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$DEPLOYER_ADDR" >>"$SETUP_LOG" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
if [[ -n "$TXNBOT_ADDRESS" ]]; then
cast rpc --rpc-url "$ANVIL_RPC" anvil_impersonateAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
cast send --rpc-url "$ANVIL_RPC" --from "$FEE_DEST" --unlocked \
"$LIQUIDITY_MANAGER" "setRecenterAccess(address)" "$TXNBOT_ADDRESS" >>"$SETUP_LOG" 2>&1
cast rpc --rpc-url "$ANVIL_RPC" anvil_stopImpersonatingAccount "$FEE_DEST" >>"$SETUP_LOG" 2>&1
fi
}
call_recenter() {
local recenter_pk="$DEPLOYER_PK"
local recenter_addr="$DEPLOYER_ADDR"
if [[ -n "$TXNBOT_ADDRESS" ]]; then
recenter_pk="$TXNBOT_PRIVATE_KEY"
recenter_addr="$TXNBOT_ADDRESS"
fi
log "Calling recenter() via $recenter_addr"
cast send --rpc-url "$ANVIL_RPC" --private-key "$recenter_pk" \
"$LIQUIDITY_MANAGER" "recenter()" >>"$SETUP_LOG" 2>&1
}
seed_application_state() {
log "Wrapping ETH to WETH"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "deposit()" --value 0.02ether >>"$SETUP_LOG" 2>&1
log "Approving router"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$WETH" "approve(address,uint256)" "$SWAP_ROUTER" "$MAX_UINT" >>"$SETUP_LOG" 2>&1
log "Executing initial KRK swap"
cast send --legacy --gas-limit 300000 --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$SWAP_ROUTER" "exactInputSingle((address,address,uint24,address,uint256,uint256,uint160))" \
"($WETH,$KRAIKEN,10000,$DEPLOYER_ADDR,10000000000000000,0,0)" >>"$SETUP_LOG" 2>&1
}
prime_chain() {
log "Pre-mining 200 blocks (2x ring buffer warmup)..."
# Try batch mine first (0xc8 = 200 blocks = 2x MINIMUM_BLOCKS_FOR_RING_BUFFER, 0x1 = 1 second interval)
if cast rpc --rpc-url "$ANVIL_RPC" anvil_mine "0xc8" "0x1" >/dev/null 2>&1; then
log "Used batch mining"
else
log "Batch mining failed, using individual evm_mine calls"
for i in {1..200}; do
cast rpc --rpc-url "$ANVIL_RPC" evm_mine >/dev/null 2>&1 || true
if ((i % 50 == 0)); then
log "Mined $i blocks..."
fi
done
fi
log "Pre-mining complete"
}
write_deployments_json() {
cat >"$ROOT_DIR/onchain/deployments-local.json" <<EODEPLOYMENTS
{
"contracts": {
"Kraiken": "$KRAIKEN",
"Stake": "$STAKE",
"LiquidityManager": "$LIQUIDITY_MANAGER"
}
}
EODEPLOYMENTS
}
write_ponder_env() {
cat >"$ROOT_DIR/services/ponder/.env.local" <<EOPONDER
PONDER_NETWORK=$NETWORK_NAME
PONDER_NETWORK=BASE_SEPOLIA_LOCAL_FORK
KRAIKEN_ADDRESS=$KRAIKEN
STAKE_ADDRESS=$STAKE
LM_ADDRESS=$LIQUIDITY_MANAGER
POOL_ADDRESS=$POOL_ADDRESS
MINIMUM_BLOCKS_FOR_RINGBUFFER=0
START_BLOCK=$DEPLOY_BLOCK
PONDER_RPC_URL_${NETWORK_NAME}=$ANVIL_RPC
PONDER_RPC_URL_BASE_SEPOLIA_LOCAL_FORK=$ANVIL_RPC
DATABASE_URL=postgresql://ponder:ponder_local@postgres:5432/ponder_local
DATABASE_SCHEMA=ponder_local_${DEPLOY_BLOCK}
EOPONDER
}
write_txn_bot_env() {
local txnbot_env=$STATE_DIR/txnBot.env
local provider_url=${TXNBOT_PROVIDER_URL:-$ANVIL_RPC}
local graphql_endpoint=${TXNBOT_GRAPHQL_ENDPOINT:-http://ponder:42069/graphql}
cat >"$txnbot_env" <<EOTXNBOT
ENVIRONMENT=$NETWORK_NAME
cat >"$TXNBOT_ENV" <<EOTXNBOT
ENVIRONMENT=BASE_SEPOLIA_LOCAL_FORK
PROVIDER_URL=$provider_url
PRIVATE_KEY=$TXNBOT_PRIVATE_KEY
LM_CONTRACT_ADDRESS=$LIQUIDITY_MANAGER
@ -130,62 +232,29 @@ PORT=43069
EOTXNBOT
}
prime_chain() {
bootstrap_log "Pre-mining 5 blocks (minimal warmup for fast Ponder sync)..."
if cast rpc --rpc-url "$ANVIL_RPC" anvil_mine "0x5" "0x1" >/dev/null 2>&1; then
bootstrap_log "Mined 5 blocks"
else
bootstrap_log "Batch mining failed, using individual evm_mine calls"
for i in {1..5}; do
cast rpc --rpc-url "$ANVIL_RPC" evm_mine >/dev/null 2>&1 || true
done
fund_txn_bot_wallet() {
if [[ -z "$TXNBOT_ADDRESS" ]]; then
return
fi
bootstrap_log "Pre-mining complete"
log "Funding txnBot wallet $TXNBOT_ADDRESS"
cast send --rpc-url "$ANVIL_RPC" --private-key "$DEPLOYER_PK" \
"$TXNBOT_ADDRESS" --value "$TXNBOT_FUND_VALUE" >>"$SETUP_LOG" 2>&1 || true
local wei hex
wei="$(cast --to-unit "$TXNBOT_FUND_VALUE" wei)"
hex="$(cast --to-hex "$wei")"
cast rpc --rpc-url "$ANVIL_RPC" anvil_setBalance "$TXNBOT_ADDRESS" "$hex" >>"$SETUP_LOG" 2>&1
}
# ── Main ───────────────────────────────────────────────────────────────
main() {
local start_time
start_time=$(date +%s%3N)
bootstrap_log "Waiting for Anvil"
log "Waiting for Anvil"
wait_for_rpc
# Idempotency: if deployments-local.json exists and contracts have code,
# bootstrap already ran against this Anvil instance — skip.
local deploy_file="$ONCHAIN_DIR/deployments-local.json"
if [[ -f "$deploy_file" ]]; then
local krk_addr
krk_addr=$(jq -r '.contracts.Kraiken // empty' "$deploy_file" 2>/dev/null || true)
if [[ -n "$krk_addr" ]]; then
local code
code=$(cast call --rpc-url "$ANVIL_RPC" "$krk_addr" "decimals()(uint8)" 2>/dev/null || true)
if [[ -n "$code" && "$code" != "0x" ]]; then
bootstrap_log "Already bootstrapped (Kraiken at $krk_addr responds) — skipping"
return 0
fi
fi
fi
maybe_set_deployer_from_mnemonic
# On forked networks, well-known addresses (Anvil mnemonic accounts) may
# have code (e.g. ERC-4337 Account Abstraction proxies on Base Sepolia).
# The feeDestination lock in LiquidityManager treats any address with code
# as a contract and locks permanently. Strip code so they behave as EOAs.
bootstrap_log "Clearing code from deployer + feeDest (fork safety)"
cast rpc --rpc-url "$ANVIL_RPC" anvil_setCode "$DEPLOYER_ADDR" "0x" 2>/dev/null || true
# 0xf6a3... carries 171 bytes of code on Base mainnet and may also carry code on Base
# Sepolia. Clear it before setFeeDestination is called so LiquidityManager does not
# permanently lock feeDestinationLocked (#760).
cast rpc --rpc-url "$ANVIL_RPC" anvil_setCode "$FEE_DEST" "0x" 2>/dev/null || true
derive_txnbot_wallet
run_forge_script
extract_addresses
write_contracts_env
bootstrap_vwap
fund_liquidity_manager
grant_recenter_access
call_recenter
seed_application_state
write_deployments_json
write_ponder_env
@ -194,17 +263,14 @@ main() {
prime_chain &
local prime_pid=$!
wait "$prime_pid"
local end_time
end_time=$(date +%s%3N)
local elapsed_ms=$((end_time - start_time))
local elapsed_sec
BOOTSTRAP_END=$(date +%s%3N)
elapsed_ms=$((BOOTSTRAP_END - BOOTSTRAP_START))
elapsed_sec=$(awk -v ms="$elapsed_ms" 'BEGIN { printf "%.3f", ms/1000 }')
bootstrap_log "Bootstrap complete in ${elapsed_sec}s"
bootstrap_log "Kraiken: $KRAIKEN"
bootstrap_log "Stake: $STAKE"
bootstrap_log "LiquidityManager: $LIQUIDITY_MANAGER"
bootstrap_log "txnBot: $TXNBOT_ADDRESS"
log "Bootstrap complete in ${elapsed_sec}s"
log "Kraiken: $KRAIKEN"
log "Stake: $STAKE"
log "LiquidityManager: $LIQUIDITY_MANAGER"
log "txnBot: $TXNBOT_ADDRESS"
}
main "$@"

View file

@ -1,59 +0,0 @@
#!/usr/bin/env bash
# Shared helpers for service entrypoints (local dev mode).
# Source this file in each entrypoint script.
# Checkout a git branch if GIT_BRANCH is set.
# Args: $1 = root directory, $2 = log prefix
entrypoint_checkout_branch() {
local root_dir="$1"
local prefix="$2"
local git_branch="${GIT_BRANCH:-}"
if [[ -z "$git_branch" ]]; then
return
fi
cd "$root_dir"
git config --global --add safe.directory "$root_dir" 2>/dev/null || true
local current
current=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$current" != "$git_branch" ]]; then
echo "[$prefix] Switching to branch: $git_branch"
if git rev-parse --verify "$git_branch" >/dev/null 2>&1; then
git checkout "$git_branch" 2>/dev/null || echo "[$prefix] WARNING: Could not checkout $git_branch"
else
git fetch origin "$git_branch" 2>/dev/null || true
git checkout "$git_branch" 2>/dev/null || echo "[$prefix] WARNING: Could not checkout $git_branch"
fi
fi
}
# Validate kraiken-lib dist exists.
# Args: $1 = root directory, $2 = log prefix
entrypoint_require_kraiken_lib() {
local root_dir="$1"
local prefix="$2"
local required_dist="$root_dir/kraiken-lib/dist/index.js"
if [[ ! -f "$required_dist" ]]; then
echo "[$prefix] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
}
# Install node_modules if needed (named volume may be empty).
# Args: $1 = log prefix
entrypoint_install_deps() {
local prefix="$1"
if [[ ! -d node_modules/.bin ]]; then
echo "[$prefix] Installing dependencies..."
npm ci --loglevel error && npm cache clean --force 2>&1 || {
echo "[$prefix] npm ci failed, trying npm install"
npm install --no-save --loglevel error && npm cache clean --force
}
else
echo "[$prefix] Using cached node_modules from volume"
fi
}

View file

@ -1,16 +0,0 @@
#!/bin/bash
# Creates the umami database and user if they don't already exist.
# Mounted as a postgres init script via docker-compose volumes.
set -e
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'umami') THEN
CREATE ROLE umami WITH LOGIN PASSWORD 'umami_local';
END IF;
END
\$\$;
SELECT 'CREATE DATABASE umami OWNER umami'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'umami')\gexec
EOSQL

View file

@ -1,5 +0,0 @@
#!/usr/bin/env bash
# Minimal CI entrypoint for landing — just starts the dev server.
set -euo pipefail
cd /app/landing
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -0,0 +1,50 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[landing-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[landing-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[landing-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
LANDING_DIR=$ROOT_DIR/landing
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[landing-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
cd "$LANDING_DIR"
DEPS_MARKER="/tmp/.landing-deps-installed"
if [[ ! -d node_modules || ! -f "$DEPS_MARKER" ]]; then
echo "[landing-entrypoint] Installing dependencies..."
npm install --no-save --loglevel error 2>&1 || {
echo "[landing-entrypoint] npm install failed, trying with --force"
npm install --force --no-save --loglevel error
}
touch "$DEPS_MARKER" || true
else
echo "[landing-entrypoint] Using cached node_modules"
fi
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export HOST=0.0.0.0
export PORT=${PORT:-5174}
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -1,32 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "landing-entrypoint"
entrypoint_require_kraiken_lib "$ROOT_DIR" "landing-entrypoint"
cd "$ROOT_DIR/landing"
entrypoint_install_deps "landing-entrypoint"
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export HOST=0.0.0.0
export PORT=${PORT:-5174}
# Source contract addresses from bootstrap output
CONTRACTS_ENV="$ROOT_DIR/tmp/containers/contracts.env"
if [[ -f "$CONTRACTS_ENV" ]]; then
source "$CONTRACTS_ENV"
export VITE_KRAIKEN_ADDRESS="${KRAIKEN:-}"
export VITE_STAKE_ADDRESS="${STAKE:-}"
echo "[landing-entrypoint] Contract addresses loaded: KRK=${KRAIKEN:-unset} STAKE=${STAKE:-unset}"
fi
export VITE_UMAMI_URL="${VITE_UMAMI_URL:-}"
export VITE_UMAMI_WEBSITE_ID="${VITE_UMAMI_WEBSITE_ID:-}"
exec npm run dev -- --host 0.0.0.0 --port 5174

View file

@ -5,10 +5,7 @@ RUN apk add --no-cache \
git \
bash \
postgresql-client \
wget \
python3 \
make \
g++
wget
USER node
WORKDIR /workspace

View file

@ -1,43 +1,28 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
cd /app/services/ponder
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
echo "[ponder-ci] Starting Ponder indexer..."
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
: "${DATABASE_URL:?DATABASE_URL is required}"
: "${PONDER_RPC_URL_1:?PONDER_RPC_URL_1 is required}"
export PONDER_RPC_TIMEOUT=${PONDER_RPC_TIMEOUT:-20000}
export HOST=${HOST:-0.0.0.0}
export PORT=${PORT:-42069}
cat > .env.local <<EOF
DATABASE_URL=${DATABASE_URL}
PONDER_RPC_URL_1=${PONDER_RPC_URL_1}
DATABASE_SCHEMA=${DATABASE_SCHEMA:-ponder_ci}
START_BLOCK=${START_BLOCK:-0}
EOF
echo "[ponder-ci] Environment configured:"
echo " DATABASE_URL: ${DATABASE_URL}"
echo " PONDER_RPC_URL_1: ${PONDER_RPC_URL_1}"
echo " START_BLOCK: ${START_BLOCK:-0}"
exec npm run dev
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[ponder-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[ponder-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[ponder-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "ponder-entrypoint"
CONTRACT_ENV=$ROOT_DIR/tmp/containers/contracts.env
CONTRACT_ENV=$ROOT_DIR/tmp/podman/contracts.env
PONDER_WORKDIR=$ROOT_DIR/services/ponder
while [[ ! -f "$CONTRACT_ENV" ]]; do
@ -74,8 +59,23 @@ if [[ -n "$START_BLOCK" ]]; then
fi
fi
entrypoint_require_kraiken_lib "$ROOT_DIR" "ponder-entrypoint"
entrypoint_install_deps "ponder-entrypoint"
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[ponder-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
DEPS_MARKER="/tmp/.ponder-deps-installed"
if [[ ! -d node_modules || ! -f "$DEPS_MARKER" ]]; then
echo "[ponder-entrypoint] Installing dependencies..."
npm install --no-save --loglevel error 2>&1 || {
echo "[ponder-entrypoint] npm install failed, trying with --force"
npm install --force --no-save --loglevel error
}
touch "$DEPS_MARKER" || true
else
echo "[ponder-entrypoint] Using cached node_modules"
fi
# Load and export all environment variables from .env.local
if [[ -f .env.local ]]; then
@ -88,6 +88,5 @@ fi
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export HOST=0.0.0.0
export PORT=${PORT:-42069}
export PONDER_RPC_TIMEOUT=${PONDER_RPC_TIMEOUT:-20000}
exec npm run dev

View file

@ -0,0 +1,56 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[txn-bot-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[txn-bot-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[txn-bot-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
TXNBOT_ENV_FILE=$ROOT_DIR/tmp/podman/txnBot.env
BOT_DIR=$ROOT_DIR/services/txnBot
REQUIRED_DIST=$ROOT_DIR/kraiken-lib/dist/index.js
while [[ ! -f "$TXNBOT_ENV_FILE" ]]; do
echo "[txn-bot-entrypoint] waiting for env file"
sleep 2
done
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[txn-bot-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
cd "$BOT_DIR"
DEPS_MARKER="/tmp/.txnbot-deps-installed"
if [[ ! -d node_modules || ! -f "$DEPS_MARKER" ]]; then
echo "[txn-bot-entrypoint] Installing txn-bot dependencies..."
npm install --no-save --loglevel error 2>&1 || {
echo "[txn-bot-entrypoint] npm install failed, trying with --force"
npm install --force --no-save --loglevel error
}
touch "$DEPS_MARKER" || true
else
echo "[txn-bot-entrypoint] Using cached node_modules"
fi
echo "[txn-bot-entrypoint] Building TypeScript..."
npm run build
export TXN_BOT_ENV_FILE="$TXNBOT_ENV_FILE"
exec npm run start

View file

@ -1,62 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
echo "[txnbot-ci] Starting Transaction Bot..."
: "${TXNBOT_PRIVATE_KEY:?TXNBOT_PRIVATE_KEY is required}"
: "${RPC_URL:?RPC_URL is required}"
: "${KRAIKEN_ADDRESS:?KRAIKEN_ADDRESS is required}"
: "${STAKE_ADDRESS:?STAKE_ADDRESS is required}"
: "${LIQUIDITY_MANAGER_ADDRESS:?LIQUIDITY_MANAGER_ADDRESS is required}"
cat > /tmp/txnBot.env <<EOF
TXNBOT_PRIVATE_KEY=${TXNBOT_PRIVATE_KEY}
RPC_URL=${RPC_URL}
KRAIKEN_ADDRESS=${KRAIKEN_ADDRESS}
STAKE_ADDRESS=${STAKE_ADDRESS}
LIQUIDITY_MANAGER_ADDRESS=${LIQUIDITY_MANAGER_ADDRESS}
POOL_ADDRESS=${POOL_ADDRESS:-}
WETH_ADDRESS=${WETH_ADDRESS:-0x4200000000000000000000000000000000000006}
EOF
export TXN_BOT_ENV_FILE=/tmp/txnBot.env
echo "[txnbot-ci] Environment configured:"
echo " RPC_URL: ${RPC_URL}"
echo " KRAIKEN_ADDRESS: ${KRAIKEN_ADDRESS}"
echo "[txnbot-ci] Building TypeScript..."
npm run build
exec npm run start
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "txnbot-entrypoint"
TXNBOT_ENV_FILE=$ROOT_DIR/tmp/containers/txnBot.env
BOT_DIR=$ROOT_DIR/services/txnBot
while [[ ! -f "$TXNBOT_ENV_FILE" ]]; do
echo "[txnbot-entrypoint] waiting for env file"
sleep 2
done
entrypoint_require_kraiken_lib "$ROOT_DIR" "txnbot-entrypoint"
cd "$BOT_DIR"
entrypoint_install_deps "txnbot-entrypoint"
echo "[txnbot-entrypoint] Building TypeScript..."
npm run build
export TXN_BOT_ENV_FILE="$TXNBOT_ENV_FILE"
exec npm run start

View file

@ -0,0 +1,68 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR=/workspace
GIT_BRANCH="${GIT_BRANCH:-}"
# Checkout branch if specified
if [[ -n "$GIT_BRANCH" ]]; then
cd "$ROOT_DIR"
git config --global --add safe.directory "$ROOT_DIR" 2>/dev/null || true
CURRENT=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
if [[ "$CURRENT" != "$GIT_BRANCH" ]]; then
echo "[webapp-entrypoint] Switching to branch: $GIT_BRANCH"
# Try local branch first, then remote
if git rev-parse --verify "$GIT_BRANCH" >/dev/null 2>&1; then
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[webapp-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
else
git fetch origin "$GIT_BRANCH" 2>/dev/null || true
git checkout "$GIT_BRANCH" 2>/dev/null || echo "[webapp-entrypoint] WARNING: Could not checkout $GIT_BRANCH"
fi
fi
fi
CONTRACT_ENV=$ROOT_DIR/tmp/podman/contracts.env
APP_DIR=$ROOT_DIR/web-app
SWAP_ROUTER=0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4
while [[ ! -f "$CONTRACT_ENV" ]]; do
echo "[frontend-entrypoint] waiting for contracts env"
sleep 2
done
REQUIRED_DIST="$ROOT_DIR/kraiken-lib/dist/index.js"
if [[ ! -f "$REQUIRED_DIST" ]]; then
echo "[frontend-entrypoint] ERROR: Run ./scripts/build-kraiken-lib.sh before starting containers" >&2
exit 1
fi
# shellcheck disable=SC1090
source "$CONTRACT_ENV"
cd "$APP_DIR"
DEPS_MARKER="/tmp/.webapp-deps-installed"
if [[ ! -d node_modules || ! -f "$DEPS_MARKER" ]]; then
echo "[frontend-entrypoint] Installing dependencies..."
npm install --no-save --loglevel error 2>&1 || {
echo "[frontend-entrypoint] npm install failed, trying with --force"
npm install --force --no-save --loglevel error
}
touch "$DEPS_MARKER" || true
else
echo "[frontend-entrypoint] Using cached node_modules"
fi
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_KRAIKEN_ADDRESS=$KRAIKEN
export VITE_STAKE_ADDRESS=$STAKE
export VITE_SWAP_ROUTER=$SWAP_ROUTER
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/

View file

@ -1,89 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ "${CI:-}" == "true" ]]; then
# ── CI path ────────────────────────────────────────────────────────
# NOTE: this block is NOT executed by the .woodpecker/e2e.yml pipeline.
# Woodpecker runs the webapp image as a service with a `commands:` block,
# which replaces the Docker ENTRYPOINT entirely — dumb-init and this script
# are bypassed. The e2e.yml commands block sources contracts.env and starts
# `npm run dev` inline (see .woodpecker/e2e.yml, webapp service, ~line 129).
#
# This path fires only for manual invocations, e.g.:
# docker run -e CI=true \
# -e VITE_KRAIKEN_ADDRESS=0x... \
# -e VITE_STAKE_ADDRESS=0x... \
# webapp-ci
#
# VITE_KRAIKEN_ADDRESS and VITE_STAKE_ADDRESS must be supplied by the caller;
# they are not derived from contracts.env here.
cd /app/web-app
echo "[webapp-ci] Starting Web App..."
: "${VITE_KRAIKEN_ADDRESS:?VITE_KRAIKEN_ADDRESS must be supplied by the caller (not sourced from contracts.env in this path)}"
: "${VITE_STAKE_ADDRESS:?VITE_STAKE_ADDRESS must be supplied by the caller (not sourced from contracts.env in this path)}"
# Disable Vue DevTools in CI to avoid path resolution issues
export CI=true
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_SWAP_ROUTER=${VITE_SWAP_ROUTER:-0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4}
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
export VITE_UMAMI_URL="${VITE_UMAMI_URL:-}"
export VITE_UMAMI_WEBSITE_ID="${VITE_UMAMI_WEBSITE_ID:-}"
echo "[webapp-ci] Environment configured:"
echo " VITE_KRAIKEN_ADDRESS: ${VITE_KRAIKEN_ADDRESS}"
echo " VITE_STAKE_ADDRESS: ${VITE_STAKE_ADDRESS}"
echo " VITE_DEFAULT_CHAIN_ID: ${VITE_DEFAULT_CHAIN_ID}"
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/
fi
# ── Local dev path ─────────────────────────────────────────────────
ROOT_DIR=/workspace
# Default is the Sepolia SwapRouter; override via SWAP_ROUTER env var for other networks.
SWAP_ROUTER=${SWAP_ROUTER:-0x94cC0AaC535CCDB3C01d6787D6413C739ae12bc4}
# shellcheck source=entrypoint-common.sh
source "$ROOT_DIR/containers/entrypoint-common.sh"
entrypoint_checkout_branch "$ROOT_DIR" "webapp-entrypoint"
CONTRACT_ENV=$ROOT_DIR/tmp/containers/contracts.env
APP_DIR=$ROOT_DIR/web-app
while [[ ! -f "$CONTRACT_ENV" ]]; do
echo "[webapp-entrypoint] waiting for contracts env"
sleep 2
done
entrypoint_require_kraiken_lib "$ROOT_DIR" "webapp-entrypoint"
# shellcheck disable=SC1090
source "$CONTRACT_ENV"
cd "$APP_DIR"
entrypoint_install_deps "webapp-entrypoint"
export VITE_DEFAULT_CHAIN_ID=${VITE_DEFAULT_CHAIN_ID:-31337}
export VITE_LOCAL_RPC_URL=${VITE_LOCAL_RPC_URL:-/api/rpc}
export VITE_LOCAL_RPC_PROXY_TARGET=${VITE_LOCAL_RPC_PROXY_TARGET:-http://anvil:8545}
export VITE_LOCAL_GRAPHQL_PROXY_TARGET=${VITE_LOCAL_GRAPHQL_PROXY_TARGET:-http://ponder:42069}
export VITE_LOCAL_TXN_PROXY_TARGET=${VITE_LOCAL_TXN_PROXY_TARGET:-http://txn-bot:43069}
export VITE_KRAIKEN_ADDRESS=$KRAIKEN
export VITE_STAKE_ADDRESS=$STAKE
export VITE_SWAP_ROUTER=$SWAP_ROUTER
export VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK=${VITE_PONDER_BASE_SEPOLIA_LOCAL_FORK:-/api/graphql}
export VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK=${VITE_TXNBOT_BASE_SEPOLIA_LOCAL_FORK:-/api/txn}
export CHOKIDAR_USEPOLLING=${CHOKIDAR_USEPOLLING:-1}
export VITE_UMAMI_URL="${VITE_UMAMI_URL:-}"
export VITE_UMAMI_WEBSITE_ID="${VITE_UMAMI_WEBSITE_ID:-}"
exec npm run dev -- --host 0.0.0.0 --port 5173 --base /app/

View file

@ -3,7 +3,7 @@
FROM mcr.microsoft.com/playwright:v1.56.0-jammy
LABEL org.opencontainers.image.source="https://codeberg.org/johba/harb-ci"
LABEL org.opencontainers.image.description="Playwright + Docker image for Harb Stack end-to-end CI"
LABEL org.opencontainers.image.description="Playwright + Podman image for Harb Stack end-to-end CI"
ENV DEBIAN_FRONTEND=noninteractive \
PNPM_HOME=/root/.local/share/pnpm \
@ -13,13 +13,19 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
apt-get install -y --no-install-recommends \
podman \
slirp4netns \
uidmap \
iptables \
git \
ca-certificates \
python3-pip \
jq \
curl && \
rm -rf /var/lib/apt/lists/*
RUN corepack enable && \
RUN python3 -m pip install --no-cache-dir podman-compose && \
corepack enable && \
corepack prepare pnpm@8.15.4 --activate && \
corepack prepare yarn@1.22.19 --activate

View file

@ -1,121 +0,0 @@
# Unified CI image for Harb services (ponder, webapp, landing, txnBot).
# Parameterized via build args — eliminates per-service Dockerfile duplication.
#
# Usage:
# docker build -f docker/Dockerfile.service-ci \
# --build-arg SERVICE_DIR=services/ponder \
# --build-arg SERVICE_PORT=42069 \
# --build-arg ENTRYPOINT_SCRIPT=containers/ponder-entrypoint.sh \
# -t ponder-ci .
# ── Build args (declared early for builder stage) ──────────────────
ARG SERVICE_DIR
ARG NPM_INSTALL_CMD=ci
# ── Builder stage ──────────────────────────────────────────────────
FROM node:20-alpine AS builder
RUN apk add --no-cache git bash
WORKDIR /app
# Copy root package files
COPY package.json package-lock.json ./
# Copy kraiken-lib package files
COPY kraiken-lib/package.json kraiken-lib/package-lock.json ./kraiken-lib/
# Copy ABI files needed by kraiken-lib
COPY onchain/out/Kraiken.sol/Kraiken.json ./onchain/out/Kraiken.sol/
COPY onchain/out/Stake.sol/Stake.json ./onchain/out/Stake.sol/
# Copy Stake.sol for sync-tax-rates + the script itself
COPY onchain/src/Stake.sol ./onchain/src/
COPY scripts/sync-tax-rates.mjs ./scripts/
# Install kraiken-lib dependencies, run sync-tax-rates, and build
WORKDIR /app/kraiken-lib
RUN npm ci --ignore-scripts
COPY kraiken-lib/ ./
RUN node ../scripts/sync-tax-rates.mjs && ./node_modules/.bin/tsc
# Install service dependencies
ARG SERVICE_DIR
ARG NPM_INSTALL_CMD
WORKDIR /app/${SERVICE_DIR}
COPY ${SERVICE_DIR}/package.json ./
# Use glob pattern to optionally copy package-lock.json (txnBot has none)
COPY ${SERVICE_DIR}/package-lock.jso[n] ./
RUN if [ "$NPM_INSTALL_CMD" = "install" ]; then npm install; else npm ci; fi
# Copy service source
COPY ${SERVICE_DIR}/ ./
# Ensure root node_modules exists (may be populated by workspace hoisting, or empty)
RUN mkdir -p /app/node_modules
# Copy onchain deployment artifacts (glob handles missing files)
WORKDIR /app
COPY onchain/deployments*.jso[n] ./onchain/
# ── Runtime stage ──────────────────────────────────────────────────
FROM node:20-alpine
RUN apk add --no-cache dumb-init wget bash
WORKDIR /app
# Copy kraiken-lib (src for Vite alias, dist for runtime, package.json for resolution)
COPY --from=builder /app/kraiken-lib/src ./kraiken-lib/src
COPY --from=builder /app/kraiken-lib/dist ./kraiken-lib/dist
COPY --from=builder /app/kraiken-lib/package.json ./kraiken-lib/
# Copy service with all node_modules
ARG SERVICE_DIR
COPY --from=builder /app/${SERVICE_DIR} ./${SERVICE_DIR}
# Copy root node_modules (workspace-hoisted deps for landing/webapp, empty for others)
COPY --from=builder /app/node_modules ./node_modules
# Copy onchain artifacts
COPY --from=builder /app/onchain ./onchain
# Create placeholder deployments-local.json if not present
RUN test -f /app/onchain/deployments-local.json || \
(mkdir -p /app/onchain && echo '{"contracts":{}}' > /app/onchain/deployments-local.json)
# Conditionally create symlinks for Vite path resolution (webapp only)
ARG NEEDS_SYMLINKS=false
RUN if [ "$NEEDS_SYMLINKS" = "true" ]; then \
ln -sf /app/web-app /web-app && \
ln -sf /app/kraiken-lib /kraiken-lib && \
ln -sf /app/onchain /onchain; \
fi
# Copy entrypoint script
# For services with entrypoints (ponder, webapp, txnbot): pass the actual entrypoint
# For landing (no entrypoint): defaults to entrypoint-common.sh which is just helpers
ARG ENTRYPOINT_SCRIPT=containers/entrypoint-common.sh
COPY ${ENTRYPOINT_SCRIPT} /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Set working directory to service
WORKDIR /app/${SERVICE_DIR}
ARG NODE_ENV=production
ENV NODE_ENV=${NODE_ENV}
ENV HOST=0.0.0.0
ARG SERVICE_PORT=8080
ENV PORT=${SERVICE_PORT}
EXPOSE ${SERVICE_PORT}
# HEALTHCHECK flags don't expand ARGs (Docker limitation), so values are hardcoded.
# PORT is an ENV (works in CMD at runtime). PATH is baked via ARG→ENV.
ARG HEALTHCHECK_PATH=/
ENV HEALTHCHECK_PATH=${HEALTHCHECK_PATH}
HEALTHCHECK --interval=5s --timeout=3s --retries=12 --start-period=20s \
CMD wget --spider -q http://127.0.0.1:${PORT}${HEALTHCHECK_PATH} || exit 1
ENTRYPOINT ["dumb-init", "--", "/entrypoint.sh"]
# Force rebuild after cache prune — 2026-02-19T21:31:36Z

View file

@ -1,158 +0,0 @@
# ARCHITECTURE.md — System Map
Compressed overview for AI agents. Read this first, drill into source for details.
## Contract Architecture
```
Kraiken.sol (ERC-20 token)
├── liquidityManager: address (set once, immutable after)
│ └── LiquidityManager.sol (ThreePositionStrategy)
│ ├── optimizer: Optimizer (private immutable ref)
│ ├── pool: IUniswapV3Pool
│ ├── kraiken: Kraiken
│ └── Positions: Floor, Anchor, Discovery
├── stakingPool: address
│ └── Stake.sol
│ ├── Staking positions with tax rates
│ ├── Snatch mechanics (competitive staking)
│ └── getPercentageStaked(), getAverageTaxRate()
└── feeDestination: address (protocol revenue — both WETH and KRK fees go HERE, not back to holders)
Optimizer.sol (UUPS Upgradeable Proxy)
├── Reads: stake.getPercentageStaked(), stake.getAverageTaxRate()
├── Computes: sentiment → 4 liquidity params
├── Versions: Optimizer, OptimizerV2, OptimizerV3, OptimizerV3Push3
└── Admin: single address, set at initialize()
```
## Key Relationships
- **Kraiken → LiquidityManager**: set once via `setLiquidityManager()`, reverts if already set
- **LiquidityManager → Optimizer**: `private immutable` — baked into constructor, never changes
- **LiquidityManager → Kraiken**: exclusive minting/burning rights
- **Optimizer → Stake**: reads sentiment data (% staked, avg tax rate)
- **Optimizer upgrades**: UUPS proxy, admin-only `_authorizeUpgrade()`
- **feeDestination receives both WETH and KRK fees**: during `recenter()`, Uniswap V3 fee collection produces both tokens. WETH fees AND KRK fees are forwarded to `feeDestination` (see `LiquidityManager._scrapePositions()`).
- **feeDestination is a conditional-lock (not set-once)**: `setFeeDestination()` (deployer-only) allows repeated changes while the destination is an EOA, enabling staged deployment and testing. The moment a contract address is set, `feeDestinationLocked` is set to `true` and no further changes are allowed. A CREATE2 guard also blocks re-assignment if the current destination has since acquired bytecode. This differs from Kraiken's `liquidityManager`/`stakingPool` which are strictly set-once.
- **feeDestination KRK excluded from outstanding supply**: `_getOutstandingSupply()` subtracts `kraiken.balanceOf(feeDestination)` before computing scarcity, because protocol-held KRK cannot be sold into the floor and should not inflate the supply count. This subtraction only occurs when `feeDestination != address(0) && feeDestination != address(this)` (see `LiquidityManager.sol:324`); when feeDestination is unset or is LM itself the balance is not subtracted.
- **Staking pool KRK excluded from outstanding supply**: `_getOutstandingSupply()` also subtracts `kraiken.balanceOf(stakingPoolAddr)`, because staked KRK is locked and similarly cannot be sold into the floor. This subtraction only occurs when `stakingPoolAddr != address(0)` (see `LiquidityManager._getOutstandingSupply()`); when the staking pool is unset the balance is not subtracted.
## Three-Position Strategy
All managed by LiquidityManager via ThreePositionStrategy abstract:
| Position | Purpose | Behavior |
|----------|---------|----------|
| **Floor** | Safety net | Deep liquidity at VWAP-adjusted prices |
| **Anchor** | Price discovery | Near current price, width set by Optimizer |
| **Discovery** | Fee capture | Borders anchor, ~3x price range (11000 tick spacing) |
**Recenter** = atomic repositioning of all three positions. Triggered by anyone, automated by txnBot.
**Recenter constraints** (enforced on-chain):
- **60-second cooldown**: `MIN_RECENTER_INTERVAL = 60` (`LiquidityManager.sol:61`). A second recenter cannot succeed until at least 60 seconds have elapsed since the last one.
- **300-second TWAP window**: `PRICE_STABILITY_INTERVAL = 300` (`PriceOracle.sol:14`). `recenter()` validates the current tick against a 5-minute TWAP average (±`MAX_TICK_DEVIATION = 50` ticks). The pool must have at least 300 seconds of observation history; a fallback to a 60 000-second window is used if recent data are unavailable.
## Optimizer Parameters
`getLiquidityParams()` returns 4 values:
1. `capitalInefficiency` (0 to 1e18) — capital buffer level
2. `anchorShare` (0 to 1e18) — % allocated to anchor position
3. `anchorWidth` (ticks) — width of anchor position
4. `discoveryDepth` (0 to 1e18) — depth of discovery position
Sentiment calculation: `sentiment = f(averageTaxRate, percentageStaked)`
- High sentiment (bull) → wider discovery, more fee revenue for protocol treasury
- Holder value comes from asymmetric slippage (structural ETH accumulation), NOT from fee reinvestment
- Low sentiment (bear) → tight around floor, maximum protection
## Push3 Seed Pool
The evolutionary optimizer runs from `tools/push3-evolution/`. Active seeds are tracked in `tools/push3-evolution/seeds/manifest.jsonl` — one JSON object per line (JSONL format).
### `manifest.jsonl` field reference
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| `file` | string | ✓ | Filename relative to `seeds/` (e.g. `optimizer_v3.push3`) |
| `origin` | `"hand-written"` \| `"evolved"` \| `"llm"` | ✓ | How the seed was produced |
| `date` | string (`YYYY-MM-DD`) | ✓ | ISO 8601 date the entry was added to the manifest |
| `fitness` | integer \| null | — | Raw fitness score (wei-scale integer). `null` when the seed has not yet been evaluated or the score has been invalidated |
| `fitness_flags` | string \| null | — | Comma-separated flags that qualify or invalidate the fitness value (e.g. `token_value_inflation,processExecIf_fix`). `null` when no flags apply |
| `run` | string \| null | — | Zero-padded run identifier from which the seed was admitted (e.g. `"007"`). `null` for `hand-written` and `llm` seeds |
| `generation` | integer \| null | — | Generation index within the run at which this candidate was produced. `null` for `hand-written` and `llm` seeds |
| `note` | string \| null | — | Human-readable description of the seed strategy or noteworthy behaviour |
The full machine-readable definition is in `tools/push3-evolution/seeds/manifest.schema.json` (JSON Schema draft 2020-12). `additionalProperties` is `false` — unknown fields are rejected. Only `file`, `origin`, and `date` are required; all other fields are optional but must match the types above when present.
## Stack
### On-chain
- Solidity, Foundry toolchain
- Uniswap V3 for liquidity positions
- OpenZeppelin for UUPS proxy, Initializable
- Base L2 (deployment target)
### Indexer
- **Ponder** (`services/ponder/`) — indexes on-chain events
- Schema: `services/ponder/ponder.schema.ts`
- Stats table with 168-slot ring buffer (7d × 24h × 4 segments)
- Ring buffer segments: [ethReserve, minted, burned, tax] (slot 3 being changed to holderCount)
- GraphQL API at port 42069
### Landing Page
- Vue 3 + Vite (`landing/`)
- `@wagmi/vue` for wallet connection (WalletButton, WalletCard)
- `@tanstack/vue-query` — required peer dep for `@wagmi/vue`; provides TanStack Query context for Wagmi's reactive hooks
- `@harb/web3` shared composables (`useAccount`, `useConnect`, `useDisconnect`, `useTokenBalance`)
- Three variants: HomeView (default), HomeViewOffensive (degens), HomeViewMixed
- Docs section: HowItWorks, Tokenomics, Staking, LiquidityManagement, AIAgent, FAQ
- LiveStats component polls Ponder GraphQL every 30s
### Staking Web App
- Vue 3 (`web-app/`)
- Password-protected (multiple passwords in LoginView.vue)
- ProtocolStatsCard shows real-time protocol metrics
### Infrastructure
- Docker Compose on 8GB VPS
- Woodpecker CI at ci.niovi.voyage
- Codeberg repo: johba/harb (private)
- Container registry: registry.niovi.voyage
## Directory Map
```
harb/
├── onchain/ # Solidity contracts + Foundry
│ ├── src/ # Contract source
│ ├── test/ # Forge tests
│ └── foundry.toml # via_ir = true required
├── services/
│ ├── ponder/ # Indexer service
│ │ ├── ponder.schema.ts
│ │ ├── src/
│ │ │ ├── helpers/stats.ts # Ring buffer logic
│ │ │ ├── lm.ts # LiquidityManager indexing
│ │ │ └── stake.ts # Stake indexing
│ └── txnBot/ # Automation bot: calls recenter() and payTax() on profitable opportunities
├── landing/ # Landing page (Vue 3)
│ ├── src/
│ │ ├── components/ # LiveStats, KFooter, WalletCard, etc.
│ │ ├── views/ # HomeView variants, docs pages
│ │ └── router/
├── web-app/ # Staking app (Vue 3)
│ ├── src/
│ │ ├── components/ # ProtocolStatsCard, etc.
│ │ └── views/ # LoginView, StakeView, etc.
├── kraiken-lib/ # Shared TypeScript helpers (bigint math, ABIs, encoding) for frontend and indexer
│ └── src/ # abis, format, ids, position, snatch, staking, subgraph, taxRates, version
├── containers/ # Docker configs, entrypoints
├── tools/ # Developer utilities
│ ├── push3-transpiler/ # Compiles Push3 programs to Solidity Optimizer
│ ├── push3-evolution/ # Evolutionary optimizer: fitness, mutation, crossover, seed generation
│ └── deploy-optimizer.sh # Script to deploy a new Optimizer version
├── docs/ # This file, PRODUCT-TRUTH.md
└── .woodpecker/ # CI pipeline configs
```

View file

@ -1,219 +0,0 @@
# ENVIRONMENT.md — Local Dev Stack
How to start, stop, and verify the harb development environment.
## Stack Overview
Docker Compose services (in startup order):
| Service | Purpose | Port | Health Check |
|---------|---------|------|-------------|
| **anvil** | Local Ethereum fork (Base Sepolia by default; override with `FORK_URL`) | 8545 | JSON-RPC response |
| **postgres** | Ponder database | 5432 | pg_isready |
| **bootstrap** | Deploys contracts to anvil | — | One-shot, exits 0 |
| **ponder** | On-chain indexer + GraphQL API | 42069 | HTTP /ready or GraphQL |
| **landing** | Landing page (Vue 3 + Vite) | 5174 | HTTP response |
| **webapp** | Staking app (Vue 3) | 5173 | HTTP response |
| **txn-bot** | Automated `recenter()` and `payTax()` upkeep ([services/txnBot/](../services/txnBot/)) | 43069 | Process alive |
| **caddy** | Reverse proxy / TLS | 80/443 | — |
| **umami** | Self-hosted analytics (Umami) | 3000 | HTTP /api/heartbeat |
| **otterscan** | Block explorer | 5100 | — |
## txnBot Service
`services/txnBot/` is the automation service responsible for keeping the protocol healthy:
- **`recenter()` monitoring** — polls Ponder GraphQL metrics and submits `recenter()` transactions to the LiquidityManager when price drift requires repositioning.
- **`payTax()` tracking** — monitors staking positions for overdue taxes and calls `payTax()` when it is profitable to do so.
- **Status endpoint** — exposes `GET /status` (port 43069) for operational health checks.
txnBot starts in the third phase of the dev stack (after ponder) alongside webapp and landing. See [services/txnBot/AGENTS.md](../services/txnBot/AGENTS.md) for configuration, safety checklist, and debugging guidance.
## Network Contexts
Two network contexts are relevant: the dev-stack Anvil (docker-compose) and the backtesting tools that require Base mainnet.
### Dev stack Anvil (docker-compose)
The `anvil` service in `docker-compose.yml` runs `containers/anvil-entrypoint.sh`, which forks:
```
${FORK_URL:-https://sepolia.base.org}
```
**Default: Base Sepolia.** The `bootstrap` service deploys all KRAIKEN protocol contracts (Kraiken, Stake, Optimizer, LiquidityManager) and creates a new KRK/WETH pool using the existing Uniswap V3 Factory already present on the forked network. Addresses are written to `tmp/containers/contracts.env`.
To fork Base mainnet instead (required for red-team / backtesting — see below):
```bash
FORK_URL=https://mainnet.base.org docker compose up -d
```
### Backtesting / red-team (`scripts/harb-evaluator/red-team.sh`)
`red-team.sh` boots the docker-compose stack and then calls protocol operations using **Base mainnet** addresses for the Uniswap V3 periphery (V3_FACTORY, SwapRouter02, NonfungiblePositionManager). These addresses are only valid on a mainnet fork.
`red-team.sh` calls `sudo docker compose up -d` internally. The script uses `sudo -E` so that `FORK_URL` is preserved across the sudo boundary:
```bash
FORK_URL=https://mainnet.base.org bash scripts/harb-evaluator/red-team.sh
```
### FitnessEvaluator (`onchain/test/FitnessEvaluator.t.sol`)
`FitnessEvaluator.t.sol` does **not** use Anvil. It uses Foundry's native revm backend (`vm.createSelectFork`) to fork Base mainnet in-process — no docker-compose dependency:
```bash
BASE_RPC_URL=https://mainnet.base.org \
FITNESS_MANIFEST_DIR=/tmp/manifest \
forge test --match-contract FitnessEvaluator --match-test testBatchEvaluate -vv
```
## Quick Start
```bash
cd /home/debian/harb
# Start everything
docker compose up -d
# Wait for bootstrap (deploys contracts, ~60-90s)
docker compose logs -f bootstrap
# Check all healthy
docker compose ps
```
## Verify Stack Health
```bash
# Anvil (local chain)
curl -s http://localhost:8545 -X POST -H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' | jq .result
# Ponder (indexer + GraphQL)
curl -s http://localhost:42069/graphql -X POST \
-H 'Content-Type: application/json' \
-d '{"query":"{ stats { id } }"}' | jq .
# Landing page
curl -sf http://localhost:5174 | head -5
# Staking app
curl -sf http://localhost:5173 | head -5
```
## Container Network
Services communicate on `harb-network` Docker bridge.
Internal hostnames match service names (e.g., `ponder:42069`).
Landing page container IP (for Playwright testing): check with
```bash
docker inspect landing --format '{{.NetworkSettings.Networks.harb_harb-network.IPAddress}}'
```
## Common URLs (for testing/review)
- **Landing:** `http://172.18.0.6:5174` (container IP) or `http://localhost:5174`
- **Staking app:** `http://localhost:5173/app/`
- **Ponder GraphQL:** `http://localhost:42069/graphql`
- **Anvil RPC:** `http://localhost:8545`
- **txnBot status:** `http://localhost:43069/status`
- **Umami analytics:** `http://localhost:3000` (default login: `admin` / `umami`)
## Resource Notes
- 8GB VPS — running full stack uses ~4-5GB RAM
- npm install inside containers can OOM with all services running
- Landing container takes ~2min to restart (npm install + vite startup)
- 4GB swap is essential for CI + stack concurrency
## Staking App Passwords
For testing login: `lobsterDao`, `test123`, `lobster-x010syqe?412!`
(defined in `web-app/src/views/LoginView.vue`)
## Webapp Environment Variables
| Variable | Default | Set in docker-compose | Purpose |
|---|---|---|---|
| `VITE_ENABLE_LOCAL_SWAP` | `false` (unset) | `true` | Show inline ETH→$KRK swap widget on Get KRK page instead of the Uniswap link. Enable for local dev; leave unset for production builds. |
| `VITE_KRAIKEN_ADDRESS` | from `deployments-local.json` | via `contracts.env` + entrypoint | Override KRK token address. |
| `VITE_STAKE_ADDRESS` | from `deployments-local.json` | via `contracts.env` + entrypoint | Override Stake contract address. |
| `VITE_DEFAULT_CHAIN_ID` | auto-detected (31337 on localhost) | — | Force the default chain. |
| `VITE_UMAMI_URL` | unset | via env | Full URL to Umami `script.js` (e.g. `https://analytics.kraiken.org/script.js`). Omit to disable analytics. |
| `VITE_UMAMI_WEBSITE_ID` | unset | via env | Umami website ID (UUID). Required alongside `VITE_UMAMI_URL`. |
## Analytics (Umami)
Self-hosted [Umami](https://umami.is/) provides privacy-respecting funnel analytics with no third-party tracking. The `umami` Docker service shares the `postgres` instance (separate `umami` database created by `containers/init-umami-db.sh`).
### Setup
1. Start the stack — Umami comes up automatically.
2. Open `http://localhost:3000` and log in (default: `admin` / `umami`). Change the password on first login.
3. Add a website in Umami and copy the **Website ID** (UUID).
4. Set the env vars before starting landing/webapp:
```bash
export VITE_UMAMI_URL=http://localhost:3000/script.js
export VITE_UMAMI_WEBSITE_ID=<your-website-id>
```
For staging/production behind Caddy, use the `/analytics/script.js` path instead.
### Tracked funnel events
| Event | App | Trigger |
|-------|-----|---------|
| `cta_click` | landing | User clicks a CTA button (label in event data) |
| `wallet_connect` | web-app | Wallet connected for the first time |
| `swap_initiated` | web-app | User submits a buy or sell swap (direction in event data) |
| `stake_created` | web-app | Stake position successfully created |
Page views are tracked automatically by the Umami script on every route change.
### Production deployment
On `harb-staging`, set `VITE_UMAMI_URL` and `VITE_UMAMI_WEBSITE_ID` in the environment and configure `UMAMI_APP_SECRET` to a strong random value. The Caddy route `/analytics*` proxies to the Umami container.
## Contract Addresses
After bootstrap, addresses are written to `/home/debian/harb/tmp/containers/contracts.env` with the following variable names (no `VITE_` prefix):
```
LIQUIDITY_MANAGER=0x...
KRAIKEN=0x...
STAKE=0x...
```
The entrypoint scripts read this file and re-export the addresses with `VITE_` prefixes for Vite builds:
- `containers/landing-entrypoint.sh` exports `VITE_KRAIKEN_ADDRESS` and `VITE_STAKE_ADDRESS`
- `containers/webapp-entrypoint.sh` exports `VITE_KRAIKEN_ADDRESS` and `VITE_STAKE_ADDRESS`
## E2E Test Environment Variables
The Playwright test setup (`tests/setup/stack.ts`) reads stack coordinates from env vars, falling back to `onchain/deployments-local.json` when they are absent.
| Variable | Purpose |
|---|---|
| `STACK_RPC_URL` | RPC endpoint (default: `http://localhost:8081/api/rpc`) |
| `STACK_WEBAPP_URL` | Web app base URL (default: `http://localhost:8081`) |
| `STACK_GRAPHQL_URL` | GraphQL endpoint (default: `http://localhost:8081/api/graphql`) |
| `STACK_KRAIKEN_ADDRESS` | Kraiken contract address (overrides deployments-local.json) |
| `STACK_STAKE_ADDRESS` | Stake contract address (overrides deployments-local.json) |
| `STACK_LM_ADDRESS` | LiquidityManager contract address (overrides deployments-local.json) |
| `STACK_OPTIMIZER_PROXY_ADDRESS` | OptimizerProxy address (optional; enables optimizer integration tests) |
When all three of `STACK_KRAIKEN_ADDRESS`, `STACK_STAKE_ADDRESS`, and `STACK_LM_ADDRESS` are set, the deployments file is not read at all, which allows tests to run in containerised environments that have no local checkout.
## Playwright Testing
```bash
# Chromium path
/home/debian/.cache/ms-playwright/chromium-1209/chrome-linux64/chrome
# Run against landing (block fonts for speed)
NODE_PATH=$(npm root -g) node test-script.cjs
```
See `tmp/user-test-r4.cjs` for the most recent test script pattern.

View file

@ -1,134 +0,0 @@
# PRODUCT-TRUTH.md — What We Can and Cannot Claim
This file is the source of truth for all product messaging, docs, and marketing.
If a claim isn't here or contradicts what's here, it's wrong. Update this file
when the protocol changes — not the marketing copy.
**Last updated:** 2026-02-22
**Updated by:** Johann + Clawy after user test review session
---
## Target Audience
- **Crypto natives** who know DeFi but don't know KrAIken
- NOT beginners. NOT "new to DeFi" users.
- Think: people who've used Uniswap, understand liquidity, know what a floor price means
## The Floor
✅ **Can say:**
- Every KRK token has a minimum redemption price backed by real ETH
- The floor is enforced by immutable smart contracts
- The floor is backed by actual ETH reserves, not promises
- No rug pulls — liquidity is locked in contracts
- "Programmatic guarantee" (borrowed from Baseline — accurate for us too)
❌ **Cannot say:**
- "The floor can never decrease" — **FALSE.** Selling withdraws ETH from reserves. The floor CAN decrease.
- "Guaranteed profit" or "risk-free" — staking is leveraged exposure, it has real downside
- "Floor always goes up" — **FALSE.** The floor rises from asymmetric slippage during balanced trading, but heavy sell pressure CAN push it down. Fees do NOT feed back to the floor (they go to protocol treasury).
## The Optimizer
✅ **Can say:**
- Reads staker sentiment (% staked, average tax rate) to calculate parameters
- Returns 4 parameters: capitalInefficiency, anchorShare, anchorWidth, discoveryDepth
- Runs autonomously on-chain — no human triggers needed for parameter reads
- Is a UUPS upgradeable proxy — can be upgraded to new versions
- Currently admin-upgradeable (single admin key set at initialization)
- Multiple versions exist: Optimizer, OptimizerV2, OptimizerV3, OptimizerV3Push3
- "The optimizer evolves" — true in the sense that new versions get deployed
❌ **Cannot say:**
- "No admin keys" — **FALSE.** UUPS upgrade requires admin. Admin key exists.
- "No proxy patterns" — **FALSE.** It IS a UUPS proxy.
- "Stakers vote for new optimizers" — **NOT YET.** This is roadmap, not current state.
- "Simply evolves" / "evolves without upgrades" — misleading. It's an explicit upgrade via proxy.
- "Three strategies" — **FALSE.** It's ONE strategy with THREE positions (Floor, Anchor, Discovery).
- "AI learns from the market" — overstated. The optimizer reads staking sentiment, not market data directly.
🔮 **Roadmap (can say "planned" / "coming"):**
- Staker governance for optimizer upgrades (vote with stake weight)
- On-chain training data → new optimizer contracts via Push3 transpiler
- Remove admin key in favor of staker voting
- Adversarial backtesting: replay red-team attack sequences against optimizer candidates (#536)
- Push3 optimizer evolution: mutate, score against attacks, select survivors (#537)
- Unified Push3 → deploy pipeline: transpile, compile, UUPS upgrade in one command (#538)
## Fee Destination
✅ **Can say:**
- Trading fees are collected by the LiquidityManager during recenters
- Fees are sent to `feeDestination` (protocol treasury / founders)
- Fee revenue is the protocol's business model
- **Both WETH and KRK fees** from Uniswap V3 positions are forwarded to `feeDestination` — not just ETH/WETH
- KRK held at `feeDestination` is excluded from the outstanding supply calculation *only when* `feeDestination != address(0) && feeDestination != address(this)` — because protocol-held KRK cannot be sold into the floor and should not inflate the scarcity metric
- KRK held in the staking pool is also excluded from the outstanding supply calculation *only when* `stakingPoolAddr != address(0)` — staked KRK is locked and cannot be sold into the floor
❌ **Cannot say:**
- "Fees grow your KRK value" — **FALSE.** Fees go to treasury, not back to holders.
- "Auto-compounding" — **FALSE.** Nothing is reinvested for holders.
- "Fee accumulation benefits holders" — **FALSE.** Holders benefit from asymmetric slippage, not fees.
⚠️ **What actually grows holder value:**
The three-position structure creates **asymmetric slippage** — buys push the price up more than sells push it down. With balanced trading activity, ETH accumulates in the system structurally, raising the effective price of KRK over time. This is a property of the liquidity layout, not fee reinvestment.
## Liquidity Positions
✅ **Can say:**
- Three positions: Floor, Anchor, Discovery
- Floor: deep liquidity at VWAP-adjusted prices (safety net)
- Anchor: near current price, fast price discovery (width set by Optimizer)
- Discovery: borders anchor, wide range (~3x current price)
- The optimizer adjusts position parameters based on sentiment
- "Recenter" = atomic repositioning of all liquidity in one transaction
- Anyone can trigger a recenter; the protocol bot does it automatically
- Recenter has a **60-second cooldown** (`MIN_RECENTER_INTERVAL = 60` in `LiquidityManager.sol`) — successive recenters are rate-limited on-chain
- Recenter requires **300 seconds of TWAP oracle history** (`PRICE_STABILITY_INTERVAL = 300` in `PriceOracle.sol`) and validates the current tick is within ±50 ticks of the 5-minute average before proceeding
- The three positions together create asymmetric slippage — buys have more price impact upward than sells have downward
- With normal trading activity, this structural asymmetry accumulates ETH, raising the floor over time
❌ **Cannot say:**
- "Three trading strategies" — it's three positions in ONE strategy
- "Token-owned liquidity" — ⚠️ USE CAREFULLY. KRK doesn't "own" anything in the legal/contract sense. The LiquidityManager manages positions. Acceptable as metaphor in marketing, not in technical docs.
- "Captures fees for holders" — fees go to feeDestination, not holders. The positions capture fees for the PROTOCOL.
## Staking
✅ **Can say:**
- Staking = leveraged directional exposure
- Stakers set tax rates; positions can be "snatched" by others willing to pay higher tax
- Tax rates influence optimizer sentiment → bull/bear positioning
- "Stakers profit when the community grows" (via supply expansion + leverage)
- Staking is optional — most holders just hold
❌ **Cannot say:**
- "Start Earning" / "Earn yield" / "APY" — staking is NOT yield farming
- "Guaranteed returns" — leveraged positions amplify losses too
- "Passive income" — tax payments are a cost, not income
## Supply Mechanics
✅ **Can say:**
- Elastic supply: buy = mint, sell = burn
- Protocol controls minting exclusively through LiquidityManager
- LiquidityManager address is set once on Kraiken contract and cannot be changed
## Code / Open Source
✅ **Can say:**
- Smart contracts are verifiable on Basescan
- Key contracts are viewable on the docs/code page
- "Full source will be published at mainnet launch" (if that's the plan)
❌ **Cannot say:**
- "Open source" — the Codeberg repo is **private**. This is currently false.
- "Audited" — unless an audit has been completed
## General Rules
1. When in doubt, understate. "The floor is backed by ETH" > "The floor guarantees you'll never lose money"
2. Separate current state from roadmap. Always.
3. Technical docs: be precise. Marketing: metaphors OK but never contradict technical reality.
4. If you're not sure a claim is true, check this file. If it's not here, verify against contract source before writing it.

View file

@ -1,28 +0,0 @@
# KrAIken (Harb)
**A token system where your tokens earn for you — backed by real ETH, governed by transparent on-chain rules.**
## What is it?
KRK is a token on Base (Ethereum L2). When you hold KRK tokens, they're backed by ETH in a trading vault — there's a built-in minimum value your tokens can't drop below.
You can **stake** your tokens to earn a share of every trade. The longer you stake, the more you accumulate. But there's a twist: someone else can **challenge** your position by committing to a higher earning rate. If that happens, you get compensated at market value — you never lose money, you just get bought out.
The system adjusts itself automatically based on how people are staking. No manual intervention, no hidden operators. Everything is on-chain and verifiable.
## Quick Links
- [How It Works](./how-it-works.md) — The mechanics explained simply
- [Getting Started](./getting-started.md) — Buy, stake, earn in 5 minutes
- [Technical Deep Dive](./technical/) — Architecture, contracts, development
## Key Numbers
- **20,000 staking positions** available (20% of total supply)
- **30 earning rate tiers** from 1% to 97% yearly
- **3-day minimum hold** before a position can be challenged
- **ETH-backed floor price** — your tokens always have a minimum value
## Is it safe?
The contracts are **not yet audited**. The code is [open source](https://codeberg.org/johba/harb) and deployed on Base. Use at your own risk, and never invest more than you can afford to lose.

View file

@ -1,89 +0,0 @@
# UX-DECISIONS.md — Design & Messaging Decisions
Living record of UX/messaging decisions. Agents must follow these.
Updated after each review session.
**Last updated:** 2026-02-22
---
## Audience
- **Primary:** Crypto natives who understand DeFi but don't know KrAIken
- **Not targeting:** Beginners, "new to DeFi" users
- No ELI5 content on the landing page
- Beginners can find their way through the docs if curious
## Landing Page Variants
| Variant | File | Target | Tone |
|---------|------|--------|------|
| Default | `HomeView.vue` | General crypto users | Clean, professional |
| Offensive | `HomeViewOffensive.vue` | Degens, technical users | Aggressive, direct |
| Mixed | `HomeViewMixed.vue` | Blend | Balanced |
**Offensive is strongest** for crypto natives. Default wins for broader appeal.
## Messaging Rules
### Do Say
- "Stake & Grow" (not "Start Earning")
- "Floor guaranteed" (the mechanism exists, even if floor can decrease)
- "Token-owned liquidity" (metaphor, acceptable in marketing)
- "No admin keys" ONLY when staker governance is implemented
- "How It Works →" as equal-weight CTA alongside "Get $KRK"
### Staking Visibility
- **Staking is NOT mentioned on the landing page.** The staking app is password-protected for a reason — it's for insiders, not casual visitors.
- Landing page sells the token and protocol. The CTA is "Get $KRK", not "Stake".
- No staking mechanics, staking CTAs, or staking explainers on any HomeView variant.
- Visitors discover staking through the community, not through the landing page.
### Don't Say
- "Start Earning" / "Earn yield" / "APY" — staking isn't yield
- "Stake" / "Stake & Grow" / "Staking" on the landing page — see above
- "You just hold and win" — too promissory (changed to "You just hold.")
- "Open source" — repo is private
- "Three strategies" — three positions, one strategy
- Raw holder count when it's low (show growth % instead)
### Staking Value Prop
The one-liner: **"Stake → invite friends → supply grows → you profit with leverage."**
Staking is leveraged directional exposure that pays off when the community grows.
It is NOT passive income. It is NOT yield farming.
## Display Rules
### Numbers
- Show **USD as primary** for all ETH amounts (people can't relate to 0.0000029 ETH)
- Keep ETH as secondary/tooltip for crypto natives
- Use CoinGecko API for ETH/USD, cache 5 min
- Format: ≥$1000 → "$25.4k", ≥$1 → "$2.50", <$1 → "$0.007"
- Never use `toFixed(4)` on tiny ETH values — use dynamic precision
### Stats
- Show growth trends (sparklines, ↑X%) not raw counts
- Multiple mini-sparklines per stat card, not a single health graph
- 7 days or since launch (whichever shorter) for historical data
- Ring buffer only — no unbounded snapshot tables (disk growth concern)
- Live indicator (green dot + "Updated Xs ago") for freshness
### Contract Addresses
- Show in footer with copy buttons
- Link to Basescan for verification
## Docs
- "How It Works" should be equal-weight navigation item, not buried
- Recenter explainer: keep it to 3 bullets (atomic, sentiment-driven, bull/bear)
- No Baseline comparison table — borrow their language, don't name-drop them
- Code page (`/docs/code`) for contract source viewing instead of Codeberg link
## Tone
- Sharp, direct, no fluff
- Technical confidence without overclaiming
- "We" when referring to the protocol community
- Never condescending
- Dark theme, minimal decoration

View file

@ -1,73 +0,0 @@
# Woodpecker CI
## Infrastructure
- **Server**: Woodpecker 3.10.0 runs as a **systemd service** (`woodpecker-server.service`), NOT a Docker container. Binary at `/usr/local/bin/woodpecker-server`.
- **Host**: `https://ci.niovi.voyage` (port 8000 locally at `http://127.0.0.1:8000`)
- **Forge**: Codeberg (Gitea-compatible) — repo `johba/harb`, forge remote ID `800173`
- **Database**: PostgreSQL at `127.0.0.1:5432`, database `woodpecker`, user `woodpecker`
- **Config**: `/etc/woodpecker/server.env` (contains secrets — agent secret, Gitea OAuth secret, DB credentials)
- **CLI**: Downloaded to `/tmp/woodpecker-cli` (v3.10.0). Requires `WOODPECKER_SERVER` and `WOODPECKER_TOKEN` env vars.
- **Logs**: `journalctl -u woodpecker-server -f` (NOT `docker logs`)
## Pipeline Configs
- `.woodpecker/build-ci-images.yml` — Builds Docker CI images using unified `docker/Dockerfile.service-ci`. Triggers on **push** to `master` or `feature/ci` when files in `docker/`, `.woodpecker/`, `containers/`, `kraiken-lib/`, `onchain/`, `services/`, `web-app/`, or `landing/` change.
- `.woodpecker/e2e.yml` — Runs Playwright E2E tests. Bootstrap step sources `scripts/bootstrap-common.sh` for shared deploy/seed logic. Health checks use `scripts/wait-for-service.sh`. Triggers on **pull_request** to `master`.
- Pipeline numbering: even = build-ci-images (push events), odd = E2E (pull_request events). This is not guaranteed but was the observed pattern.
## Monitoring Pipelines via DB
Since the Woodpecker API requires authentication (tokens are cached in server memory; DB-only token changes don't work without a server restart), monitor pipelines directly via PostgreSQL:
```bash
# Latest pipelines
PGPASSWORD='<db_password>' psql -h 127.0.0.1 -U woodpecker -d woodpecker -c \
"SELECT number, status, branch, event, commit FROM pipelines
WHERE repo_id = (SELECT id FROM repos WHERE full_name = 'johba/harb')
ORDER BY number DESC LIMIT 5;"
# Step details for a specific pipeline
PGPASSWORD='<db_password>' psql -h 127.0.0.1 -U woodpecker -d woodpecker -c \
"SELECT s.name, s.state,
CASE WHEN s.finished > 0 AND s.started > 0 THEN (s.finished - s.started)::int::text || 's'
ELSE '-' END as duration, s.exit_code
FROM steps s WHERE s.pipeline_id = (
SELECT id FROM pipelines WHERE number = <N>
AND repo_id = (SELECT id FROM repos WHERE full_name = 'johba/harb'))
ORDER BY s.started NULLS LAST;"
```
## Triggering Pipelines
- **Normal flow**: Push to Codeberg → Codeberg fires webhook to `https://ci.niovi.voyage/api/hook` → Woodpecker creates pipeline.
- **Known issue**: Codeberg webhooks can stop firing if `ci.niovi.voyage` becomes unreachable (DNS/connectivity). Check Codeberg repo settings → Webhooks to verify delivery history and re-trigger.
- **Manual trigger via API** (requires valid token — see known issues):
```bash
WOODPECKER_SERVER=http://127.0.0.1:8000 WOODPECKER_TOKEN=<token> \
/tmp/woodpecker-cli pipeline create --branch feature/ci johba/harb
```
- **API auth limitation**: The server caches user token hashes in memory. Inserting a token directly into the DB does not work without restarting the server (`sudo systemctl restart woodpecker-server`).
## CI Docker Images
- `docker/Dockerfile.service-ci` — Unified parameterized Dockerfile for all service CI images (ponder, webapp, landing, txnBot). Uses `--build-arg` for service-specific configuration (SERVICE_DIR, SERVICE_PORT, ENTRYPOINT_SCRIPT, NEEDS_SYMLINKS, etc.).
- **sync-tax-rates**: Builder stage runs `scripts/sync-tax-rates.mjs` to sync tax rates from `Stake.sol` into kraiken-lib before TypeScript compilation.
- **Symlinks fix** (webapp only, `NEEDS_SYMLINKS=true`): Creates `/web-app`, `/kraiken-lib`, `/onchain` symlinks to work around Vite's `removeBase()` stripping `/app/` prefix from filesystem paths.
- **CI env detection** (`CI=true`): Disables Vue DevTools plugin in `vite.config.ts` to prevent 500 errors caused by path resolution issues with `/app/` base path.
- **HEALTHCHECK**: Configurable via build args; webapp uses `--retries=84 --interval=5s` = 420s (7 min), aligned with `wait-for-stack` step timeout.
- **Shared entrypoints**: Each service uses a unified entrypoint script (`containers/<service>-entrypoint.sh`) that branches on `CI=true` env var for CI vs local dev paths. Common helpers in `containers/entrypoint-common.sh`.
- **Shared bootstrap**: `scripts/bootstrap-common.sh` contains shared contract deployment, seeding, and funding functions used by both `containers/bootstrap.sh` (local dev) and `.woodpecker/e2e.yml` (CI).
- CI images are tagged with git SHA and `latest`, pushed to a local registry.
## CI Agent & Registry Auth
- **Agent**: Runs as user `ci` (uid 1001) on `harb-staging`, same host as the dev environment. Binary at `/usr/local/bin/woodpecker-agent`.
- **Registry credentials**: The `ci` user must have Docker auth configured at `/home/ci/.docker/config.json` to pull private images from `registry.niovi.voyage`. If images fail to pull with "no basic auth credentials", fix with:
```bash
sudo mkdir -p /home/ci/.docker
sudo cp /home/debian/.docker/config.json /home/ci/.docker/config.json
sudo chown -R ci:ci /home/ci/.docker
sudo chmod 600 /home/ci/.docker/config.json
```
- **Shared Docker daemon**: The `ci` and `debian` users share the same Docker daemon. Running `docker system prune` as `debian` removes images cached for CI pipelines. If CI image pulls fail after a prune, either fix registry auth (above) or pre-pull images as `debian`: `docker pull registry.niovi.voyage/harb/ponder-ci:latest` etc.
## Debugging Tips
- If pipelines aren't being created after a push, check Codeberg webhook delivery logs first.
- The Woodpecker server needs `sudo` to restart. Without it, you cannot: refresh API tokens, clear cached state, or recover from webhook auth issues.
- E2E pipeline failures often come from `wait-for-stack` timing out. Check the webapp HEALTHCHECK alignment and Ponder indexing time.
- The `web-app/vite.config.ts` `allowedHosts` array must include container hostnames (`webapp`, `caddy`) for health checks to succeed inside Docker networks.
- **Never use `bash -lc`** in Woodpecker pipeline commands — login shell resets PATH via `/etc/profile`, losing Foundry and other tools set by Docker ENV. Use `bash -c` instead.

View file

@ -1,32 +0,0 @@
# Codeberg API Access
## Authentication
Codeberg API tokens are stored in `~/.netrc` (standard `curl --netrc` format, `chmod 600`):
```
machine codeberg.org
login johba
password <api-token>
```
The `password` field holds the API token — this is standard `.netrc` convention, not an actual password.
## Generating Tokens
Generate tokens at `https://codeberg.org/user/settings/applications`.
## Usage
Pass `--netrc` to curl for authenticated Codeberg API calls:
```bash
# List issues
curl --netrc -s https://codeberg.org/api/v1/repos/johba/harb/issues | jq '.[0].title'
# Get a specific issue
curl --netrc -s https://codeberg.org/api/v1/repos/johba/harb/issues/42 | jq '.title, .body'
# List pull requests
curl --netrc -s https://codeberg.org/api/v1/repos/johba/harb/pulls | jq '.[].title'
```
## Git vs API
The repo uses SSH for git push/pull (`ssh://git@codeberg.org`), so `.netrc` is only used for REST API interactions (issues, PRs, releases).
## Webhooks
Codeberg sends webhooks to `https://ci.niovi.voyage/api/hook` to trigger Woodpecker CI pipelines. If webhooks stop firing (e.g. DNS issues), check Codeberg repo settings → Webhooks to verify delivery history and re-trigger.

View file

@ -1,67 +0,0 @@
# Dev Environment
## Prerequisites
Docker Engine (Linux) or Colima (Mac). See `docs/docker.md` for installation.
## Quick Start
```bash
nohup ./scripts/dev.sh start & # start (takes ~3-6 min first time)
tail -f nohup.out # watch progress
./scripts/dev.sh health # verify all services healthy
./scripts/dev.sh stop # stop and clean up
```
Do not launch services individually — `dev.sh` enforces phased startup with health gates.
## Restart Modes
- `./scripts/dev.sh restart --light` — Fast (~10-20s): only webapp + txnbot, preserves Anvil/Ponder state. Use for frontend changes.
- `./scripts/dev.sh restart --full` — Full (~3-6min): redeploys contracts, fresh state. Use for contract changes.
## Environments
Supported: `BASE_SEPOLIA_LOCAL_FORK` (default Anvil fork), `BASE_SEPOLIA`, and `BASE`. Match contract addresses and RPCs accordingly.
## Ports
The stack uses these ports:
| Port | Service |
|-------|------------|
| 8545 | Anvil |
| 5173 | webapp |
| 5174 | landing |
| 42069 | Ponder |
| 43069 | txnBot |
| 5100 | Otterscan |
| 8081 | Caddy |
Check with `lsof -i :<port>` if startup fails.
## Docker Topology
- `docker-compose.yml` has NO `depends_on`. Service ordering is handled by `scripts/dev.sh`.
- Startup phases: anvil+postgres → bootstrap → ponder → webapp/landing/txn-bot → caddy → smoke test.
- Shared bootstrap: `scripts/bootstrap-common.sh` (sourced by both local dev and CI).
- 20GB disk limit enforced. `dev.sh stop` auto-prunes. Named volumes for `node_modules`.
- All services have log rotation (30MB max per container) and PostgreSQL WAL limits configured.
## kraiken-lib Build
- Run `./scripts/build-kraiken-lib.sh` before `docker-compose up` so containers mount a fresh `kraiken-lib/dist` from the host.
- `scripts/watch-kraiken-lib.sh` rebuilds on file changes (requires inotify-tools) and restarts dependent containers automatically.
- The dev script runs the build automatically on `start`, but manual rebuilds are needed if you change kraiken-lib while the stack is already running.
## Common Pitfalls
- **Docker disk full**: `dev.sh start` refuses to run if Docker disk usage exceeds 20GB. Fix: `./scripts/dev.sh stop` (auto-prunes) or `docker system prune -af --volumes`.
- **Stale Ponder state**: If Ponder fails with schema errors after contract changes, delete its state: `rm -rf services/ponder/.ponder/` then `./scripts/dev.sh restart --full`.
- **kraiken-lib out of date**: If services fail with import errors or missing exports, rebuild: `./scripts/build-kraiken-lib.sh`.
- **Container not found errors**: `dev.sh` expects Docker Compose v2 container names (`harb-anvil-1`, hyphens not underscores). Verify with `docker compose version`.
- **Port conflicts**: See Ports table above. Check with `lsof -i :<port>` if startup fails.
- **npm ci failures in containers**: Named Docker volumes cache `node_modules/`. If dependencies change and installs fail, remove the volume: `docker volume rm harb_webapp_node_modules` (or similar), then restart.
## Handy Commands
```bash
foundryup # update Foundry toolchain
anvil --fork-url https://sepolia.base.org # manual fork for diagnosing outside dev.sh
# inspect services while stack is running
curl http://localhost:8081/api/txn/status
curl -X POST http://localhost:8081/api/graphql \
-d '{"query":"{ stats(id:\"0x01\"){kraikenTotalSupply}}"}'
cast call <POOL> "slot0()" # inspect pool state
PONDER_NETWORK=BASE_SEPOLIA_LOCAL_FORK npm run dev # focused Ponder debugging (inside services/ponder/)
```

View file

@ -1,60 +0,0 @@
# Getting Started
## What You Need
1. A Web3 wallet (MetaMask, Coinbase Wallet, etc.)
2. Some ETH on **Base** network
3. 5 minutes
## Step 1: Get KRK Tokens
1. Go to the [KrAIken app](/app/get-krk)
2. Connect your wallet
3. Swap ETH for KRK on Uniswap
- Make sure you're on **Base** network
- Use the 1% fee tier pool
**Tip:** Start small. The protocol is unaudited — only use what you're comfortable risking.
## Step 2: Stake Your Tokens
1. Go to the [Staking Dashboard](/app/stake)
2. Connect your wallet (if not already connected)
3. Choose how many KRK tokens to stake
- Minimum stake is displayed in the form
4. Pick your earning rate (tax rate)
- Lower = cheaper to hold, but easier to challenge
- Higher = more expensive, but harder to challenge
- Start with a mid-range rate if you're unsure
5. Click **Stake** and confirm the transaction
## Step 3: Monitor Your Position
Once staked, you'll see your position in the **Active Positions** section:
- Your slot count and ownership percentage
- Current earning rate
- Accrued tax obligation
You can view detailed stats in your [Wallet Dashboard](/app/wallet/).
## Understanding the Numbers
- **Owner Slots**: Your share of the staking pool. 1,000 slots = 1% ownership.
- **Tax Rate**: What you pay yearly to hold your position. Paid when you unstake or manually.
- **Floor Tax**: The minimum rate needed to challenge existing positions.
- **Positions Buyout**: How many positions your rate would displace.
## Unstaking
To exit a position:
1. Find your position in the Active Positions list
2. Click to expand it
3. Choose to unstake (partially or fully)
4. You receive your staked tokens plus any earnings, minus tax owed
## Tips
- **Check the floor tax** before staking. If it's high, many positions are actively defended.
- **Watch the ETH reserve** on the landing page — growing reserve = healthy protocol.
- **Don't panic if challenged** — you get paid out at market value. You can always re-stake.
- **Join the community** — [Telegram](https://t.me/kraikenportal) for questions and discussion.

View file

@ -1,62 +0,0 @@
# How It Works
## The Basics
KRK tokens trade on Uniswap (Base network). Behind the scenes, a **trading vault** holds ETH that backs every KRK token. This creates a **floor price** — the absolute minimum value your tokens are worth.
## Earning by Staking
When you stake KRK tokens, you claim **owner slots** — a percentage of the protocol's staking pool. Every time someone buys KRK on the open market, new tokens are minted, and stakers get a proportional share. The more slots you hold, the more you earn.
### Choosing Your Rate
When you stake, you pick an **earning rate** (called a "tax rate" in the contracts). This is the yearly cost of holding your position:
| Rate Level | Yearly Cost | Trade-off |
|-----------|------------|-----------|
| Low (1-5%) | Cheap to hold | Easy for others to challenge |
| Medium (12-30%) | Moderate cost | Balanced protection |
| High (50%+) | Expensive to hold | Very hard to challenge |
**The key insight:** Your earning rate is also your protection level. A higher rate costs more, but makes it harder for anyone to take your position.
## Challenges (Snatching)
If someone wants your staking slots and is willing to pay a higher rate than you, they can **challenge** (snatch) your position:
1. The challenger stakes at a higher rate
2. Your position is automatically closed
3. You receive the **full market value** of your staked tokens — including any earnings
4. The challenger takes over your slots
**You never lose money in a challenge.** You get compensated at current market value. You just stop earning from those slots.
## The Trading Vault
The Liquidity Manager automatically manages the ETH/KRK trading pool:
- When staking activity is high (bullish signal), it concentrates liquidity for better trading
- When activity drops, it spreads liquidity wider for stability
- It tracks a **volume-weighted average price (VWAP)** to set the range
This happens automatically — no human decisions, no hidden operators. The rules are in the smart contract.
## Floor Price
Every KRK token is backed by ETH in the vault. The **floor price** is calculated as:
```
floor = ETH in vault ÷ total KRK supply
```
Your tokens can never be worth less than the floor. When someone buys KRK, more ETH enters the vault. When someone sells, ETH leaves. The system maintains balance.
## Summary
1. **Buy KRK** on Uniswap (Base)
2. **Stake** to earn from every trade
3. **Choose your rate** — higher = more protection, higher cost
4. **Earn passively** as the protocol generates trading activity
5. If challenged, you get **paid out at market value**
→ [Getting Started Guide](./getting-started.md)

View file

@ -1,325 +0,0 @@
# Mainnet VWAP Bootstrap Runbook
**Target chain:** Base (chain ID 8453)
## Why a manual process?
The VWAP bootstrap cannot be completed in a single Forge script execution. Two hard time-based delays imposed by the contracts make this impossible:
1. **300 s TWAP warm-up**`recenter()` reads the Uniswap V3 TWAP oracle and reverts with `"price deviated from oracle"` if the pool has fewer than 300 seconds of observation history. A pool created within the same broadcast has zero history.
2. **60 s recenter cooldown**`recenter()` enforces a per-call cooldown (`lastRecenterTime + 60 s`). The first and second recenters cannot share a single broadcast.
`DeployBase.sol` contains an inline bootstrap attempt that will always fail on a freshly-created pool. Follow this runbook instead.
---
## Prerequisites
```bash
# Required environment variables — set before starting
export BASE_RPC="https://mainnet.base.org" # or your preferred Base RPC
export DEPLOYER_KEY="0x<your-private-key>"
export BASESCAN_API_KEY="<your-api-key>"
# Populated after Step 1 (deploy)
export LM_ADDRESS="" # LiquidityManager proxy address
export KRAIKEN="" # Kraiken token address
export POOL="" # Uniswap V3 pool address
# Protocol constants (Base mainnet)
export WETH="0x4200000000000000000000000000000000000006"
export SWAP_ROUTER="0x2626664c2603336E57B271c5C0b26F421741e481" # Uniswap V3 SwapRouter02
export DEPLOYER_ADDRESS="$(cast wallet address --private-key $DEPLOYER_KEY)"
# Minimum ETH required in deployer wallet:
# gas for deploy (~0.05 ETH) + 0.01 ETH LM seed + 0.005 ETH seed buy
```
---
## Step 1 — Deploy contracts (pool init)
Run the mainnet deploy script. `DeployBase.sol` wraps the inline `recenter()` call in a try/catch, so if the pool is too fresh for the TWAP oracle the bootstrap is skipped with a warning and the deployment still succeeds. The deploy script then prints instructions directing you to complete the bootstrap manually.
```bash
cd onchain
forge script script/DeployBaseMainnet.sol \
--rpc-url $BASE_RPC \
--broadcast \
--verify \
--etherscan-api-key $BASESCAN_API_KEY \
--slow \
--private-key $DEPLOYER_KEY
```
> **Note:** If the script still aborts during simulation (e.g., due to an older version of `DeployBase.sol` without the try/catch), see [Troubleshooting](#troubleshooting) for how to separate the deploy from the bootstrap.
After the broadcast completes, record the addresses from the console output:
```bash
export LM_ADDRESS="0x..." # LiquidityManager address from deploy output
export KRAIKEN="0x..." # Kraiken address from deploy output
export POOL="0x..." # Uniswap V3 pool address from deploy output
```
Verify the pool exists and has been initialized:
```bash
cast call $POOL "slot0()" --rpc-url $BASE_RPC
# Returns: sqrtPriceX96, tick, ... (non-zero sqrtPriceX96 confirms initialization)
```
Record the block timestamp of pool creation:
```bash
export POOL_INIT_TS=$(cast block latest --rpc-url $BASE_RPC --field timestamp)
echo "Pool initialized at Unix timestamp: $POOL_INIT_TS"
echo "First recenter available after: $(( POOL_INIT_TS + 300 )) ($(date -d @$(( POOL_INIT_TS + 300 )) 2>/dev/null || date -r $(( POOL_INIT_TS + 300 )) 2>/dev/null))"
```
---
## Step 2 — Wait ≥ 300 s (TWAP warm-up)
The Uniswap V3 TWAP oracle must accumulate at least 300 seconds of observation history before `recenter()` can succeed. Do not proceed until 300 seconds have elapsed since pool initialization.
```bash
# Poll until 300 s have elapsed since pool creation
TARGET_TS=$(( POOL_INIT_TS + 300 ))
while true; do
NOW=$(cast block latest --rpc-url $BASE_RPC --field timestamp)
REMAINING=$(( TARGET_TS - NOW ))
if [ "$REMAINING" -le 0 ]; then
echo "TWAP warm-up complete. Proceeding to first recenter."
break
fi
echo "Waiting ${REMAINING}s more for TWAP warm-up..."
sleep 10
done
```
---
## Step 3 — Fund LiquidityManager and first recenter
Fund the LiquidityManager with the seed ETH it needs to place bootstrap positions, then call `recenter()` for the first time.
```bash
# Fund LiquidityManager (0.01 ETH minimum for bootstrap positions)
cast send $LM_ADDRESS \
--value 0.01ether \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
# Confirm balance
cast balance $LM_ADDRESS --rpc-url $BASE_RPC
```
```bash
# First recenter — places anchor, floor, and discovery positions
cast send $LM_ADDRESS \
"recenter()" \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
echo "First recenter complete."
```
Record the timestamp immediately after this call — the 60 s cooldown starts now:
```bash
export FIRST_RECENTER_TS=$(cast block latest --rpc-url $BASE_RPC --field timestamp)
echo "First recenter at Unix timestamp: $FIRST_RECENTER_TS"
echo "Second recenter available after: $(( FIRST_RECENTER_TS + 60 ))"
```
---
## Step 4 — Seed buy (generate non-zero anchor fee)
The VWAP bootstrap path in `recenter()` only records the price anchor when `ethFee > 0` (i.e., when the anchor position has collected a fee). Execute a small buy of KRAIKEN to generate that fee.
```bash
# Step 4a — Wrap ETH to WETH
cast send $WETH \
"deposit()" \
--value 0.005ether \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
# Step 4b — Approve SwapRouter to spend WETH
cast send $WETH \
"approve(address,uint256)" $SWAP_ROUTER 5000000000000000 \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
# Step 4c — Seed buy: swap 0.005 WETH → KRAIKEN via the 1 % pool
# SwapRouter02 exactInputSingle struct (7 fields — no deadline):
# tokenIn, tokenOut, fee, recipient, amountIn, amountOutMinimum, sqrtPriceLimitX96
cast send $SWAP_ROUTER \
"exactInputSingle((address,address,uint24,address,uint256,uint256,uint160))(uint256)" \
"($WETH,$KRAIKEN,10000,$DEPLOYER_ADDRESS,5000000000000000,0,0)" \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
echo "Seed buy complete. Anchor position has collected a fee."
```
Confirm the pool executed the swap (non-zero KRK balance in deployer wallet):
```bash
cast call $KRAIKEN "balanceOf(address)" $DEPLOYER_ADDRESS --rpc-url $BASE_RPC
# Should be > 0
```
---
## Step 5 — Wait ≥ 60 s (recenter cooldown)
```bash
TARGET_TS=$(( FIRST_RECENTER_TS + 60 ))
while true; do
NOW=$(cast block latest --rpc-url $BASE_RPC --field timestamp)
REMAINING=$(( TARGET_TS - NOW ))
if [ "$REMAINING" -le 0 ]; then
echo "Recenter cooldown elapsed. Proceeding to second recenter."
break
fi
echo "Waiting ${REMAINING}s more for recenter cooldown..."
sleep 5
done
```
---
## Step 6 — Second recenter (records VWAP anchor)
The second `recenter()` hits the bootstrap path inside `LiquidityManager`: `cumulativeVolume == 0` and `ethFee > 0`, so it records the VWAP price anchor and sets `cumulativeVolume > 0`, permanently closing the bootstrap window.
```bash
# LM_ADDRESS must already be set from Step 1.
# BootstrapVWAPPhase2.s.sol reads the broadcaster key from the .secret
# seed-phrase file in onchain/ (same as DeployBase.sol). Ensure that file
# is present; the --private-key CLI flag is NOT used by this script.
forge script script/BootstrapVWAPPhase2.s.sol \
--tc BootstrapVWAPPhase2 \
--rpc-url $BASE_RPC \
--broadcast
```
The script asserts `cumulativeVolume > 0` and will fail with an explicit message if the bootstrap did not succeed.
---
## Step 7 — Verify bootstrap success
```bash
# cumulativeVolume must be > 0
cast call $LM_ADDRESS "cumulativeVolume()" --rpc-url $BASE_RPC
# Expected: non-zero value
# VWAP should now reflect the seed buy price
cast call $LM_ADDRESS "getVWAP()" --rpc-url $BASE_RPC 2>/dev/null || \
echo "(getVWAP may not be a public function — check cumulativeVolume above)"
# Three positions should be in place
cast call $LM_ADDRESS "positions(0)" --rpc-url $BASE_RPC # floor
cast call $LM_ADDRESS "positions(1)" --rpc-url $BASE_RPC # anchor
cast call $LM_ADDRESS "positions(2)" --rpc-url $BASE_RPC # discovery
# LM should hold ETH / WETH for ongoing operations
cast balance $LM_ADDRESS --rpc-url $BASE_RPC
```
---
## Recovery from failed mid-sequence bootstrap
If the bootstrap fails partway through (e.g., the second `recenter()` in Step 6 reverts due to insufficient price movement / "amplitude not reached"), the LiquidityManager is left in a partially bootstrapped state:
- **Positions deployed** — the first `recenter()` placed anchor, floor, and discovery positions
- **`cumulativeVolume == 0`** — the VWAP anchor was never recorded
- **`feeDestination` set** — `DeployBase.sol` sets this before any recenter attempt
- **`recenter()` is permissionless** — no access control to revoke; anyone can call it
### Diagnosing the state
```bash
# Check if VWAP bootstrap completed (0 = not yet bootstrapped)
cast call $LM_ADDRESS "cumulativeVolume()(uint256)" --rpc-url $BASE_RPC
# Check current feeDestination
cast call $LM_ADDRESS "feeDestination()(address)" --rpc-url $BASE_RPC
# Check if feeDestination is locked (true = cannot be changed)
cast call $LM_ADDRESS "feeDestinationLocked()(bool)" --rpc-url $BASE_RPC
# Check if positions exist (non-zero liquidity = positions deployed)
cast call $LM_ADDRESS "positions(uint8)(int24,int24,uint128)" 1 --rpc-url $BASE_RPC
```
### Recovery steps
1. **Identify the failure cause** — check the revert reason from Step 6. Common causes:
- `"amplitude not reached."` — the seed buy did not move the price enough ticks for `recenter()` to accept the movement as significant
- `"price deviated from oracle"` — TWAP history is still insufficient
- `"recenter cooldown"` — 60 s has not elapsed since the last recenter
2. **Fix the root cause:**
- For amplitude issues: execute a larger seed buy (Step 4 with more ETH) to generate more price movement and anchor fees
- For TWAP issues: wait longer for oracle history to accumulate
- For cooldown: simply wait 60 s
3. **Retry the second recenter** — re-run Step 6 (`BootstrapVWAPPhase2.s.sol`) or call `recenter()` directly:
```bash
cast send $LM_ADDRESS "recenter()" --rpc-url $BASE_RPC --private-key $DEPLOYER_KEY
```
4. **Verify** — confirm `cumulativeVolume > 0` (Step 7)
5. **If `feeDestination` needs correction** (e.g., was set to the wrong address):
```bash
# Only works if feeDestinationLocked is false
cast send $LM_ADDRESS \
"setFeeDestination(address)" <CORRECT_FEE_DEST_ADDRESS> \
--rpc-url $BASE_RPC \
--private-key $DEPLOYER_KEY
```
### Automated recovery
A helper script automates the diagnosis and retry:
```bash
# Diagnose and retry bootstrap
scripts/recover-bootstrap.sh --rpc-url $BASE_RPC --private-key $DEPLOYER_KEY --lm $LM_ADDRESS
```
See `scripts/recover-bootstrap.sh --help` for all options.
---
## Troubleshooting
### `forge script` aborts before broadcast due to recenter() revert
Foundry simulates the entire `run()` function before broadcasting anything. If the inline bootstrap in `DeployBase.sol` causes the simulation to fail, no transactions are broadcast.
**Workaround:** Comment out the bootstrap block in `DeployBase.sol` locally (lines 101145, from `// =====================================================================` through `seedSwapper.executeSeedBuy{ value: SEED_SWAP_ETH }(sender);`) before running the deploy script, then restore it afterward. The bootstrap is then performed manually using Steps 36 above.
### `recenter()` reverts with "price deviated from oracle"
The pool has insufficient TWAP history. Wait longer and retry. At least one block must have been produced with the pool at its initialized price before the 300 s counter begins.
### `recenter()` reverts with "cooldown"
The 60 s cooldown has not elapsed since the last recenter. Wait and retry.
### Seed buy produces zero KRK
The pool may have no in-range liquidity (first recenter did not place positions successfully). Check positions via `cast call $LM_ADDRESS "positions(1)"` and re-run Step 3 if the anchor position is empty.
### BootstrapVWAPPhase2 fails with "cumulativeVolume is still 0"
The anchor position collected no fees — either the seed buy was too small to generate a fee, or the swap routed through a different pool. Repeat Step 4 with a larger `amountIn` (e.g., `0.01 ether` / `10000000000000000`) and re-run Step 56.

44
docs/podman.md Normal file
View file

@ -0,0 +1,44 @@
# Podman Staging Environment
The Podman stack mirrors `scripts/dev.sh` using long-lived containers. Every boot spins up a fresh Base Sepolia fork, redeploys contracts, seeds liquidity, and launches the live-reload services behind Caddy on port 80.
## Service Topology
- `anvil` Base Sepolia fork with optional mnemonic from `onchain/.secret.local`
- `bootstrap` one-shot job running `DeployLocal.sol`, seeding liquidity, priming blocks, and writing shared env files
- `ponder` `npm run dev` for the indexer (port 42069 inside the pod)
- `frontend` Vite dev server for `web-app` (port 5173 inside the pod)
- `txn-bot` automation loop plus Express status API (port 43069 inside the pod)
- `caddy` front door at `http://<host>:80`, routing `/api/graphql`, `/health`, `/api/rpc`, and `/api/txn` to the internal services
All containers mount the repository so code edits hot-reload exactly as the local script. Named volumes keep `node_modules` caches between restarts.
## Prerequisites
- Podman 4.x (rootless recommended)
- `podman-compose`
## Launching
```bash
podman-compose -f podman-compose.yml build
podman-compose -f podman-compose.yml up
```
- First run takes several minutes while Foundry installs deps, deploys contracts, and runs the seeding transactions.
- Use `podman-compose down` to stop. Bring-up always redeploys and rewrites `services/ponder/.env.local` plus `tmp/podman/txnBot.env`.
### Access Points (via Caddy)
- Frontend: `http://<host>/`
- GraphQL: `http://<host>/api/graphql`
- RPC passthrough: `http://<host>/api/rpc`
- Txn bot status: `http://<host>/api/txn/status`
## Configuration Knobs
Set environment variables before `podman-compose up`:
- `FORK_URL` Anvil upstream RPC (defaults to `https://sepolia.base.org`)
- `DEPLOYER_PK`, `DEPLOYER_ADDR` override deployer wallet; otherwise derived from `.secret.local` or Foundry defaults
- `TXNBOT_PRIVATE_KEY`, `TXNBOT_ADDRESS`, `TXNBOT_FUND_VALUE` customise bot signer and funding
Edit `containers/Caddyfile` if you need different routes or ports.
## Known Limitations
- State is ephemeral; every restart wipes the fork and redeploys contracts.
- Processes run in dev/watch mode (`npm run dev`), so staging traffic is not production hardened.
- Secrets live in env files inside the repo mount because no external secret store is wired in.

View file

@ -1,90 +0,0 @@
# Technical Architecture
## System Overview
KrAIken consists of three on-chain contracts, a real-time indexer, and two web frontends.
```
┌──────────────┐ ┌──────────────┐ ┌──────────────────────┐
│ Kraiken │────▶│ Stake │ │ LiquidityManager │
│ (ERC20) │ │ (Staking) │ │ (Pool Management) │
└──────────────┘ └──────────────┘ └──────────────────────┘
│ │ │
└────────────────────┼────────────────────────┘
┌───────▼────────┐
│ Ponder Indexer │
│ (GraphQL API) │
└───────┬────────┘
┌─────────────┼─────────────┐
│ │
┌───────▼────────┐ ┌───────▼────────┐
│ Landing Page │ │ Staking App │
│ (Vue 3/Vite) │ │ (Vue 3/Vite) │
└────────────────┘ └────────────────┘
```
## Smart Contracts
### Kraiken.sol (ERC20 Token)
- Standard ERC20 with controlled minting by LiquidityManager
- 20% of supply reserved for staking pool
- Min stake fraction: 1/3000 of total supply (~399 KRK at current supply)
- Tracks `previousTotalSupply` for staking calculations
- Version field for indexer compatibility
### Stake.sol (Staking Positions)
- Creates/manages staking positions with self-assessed tax rates
- 30 discrete tax rate tiers: 1%, 3%, 5%, 8%, 12%, ... up to 97%
- Snatching: higher tax rate can displace lower positions
- 3-day minimum hold (`TAX_FLOOR_DURATION`) before snatch
- Position payout at market value when snatched or unstaked
### LiquidityManager.sol (Pool Management)
- Manages Uniswap V3 concentrated liquidity position
- Recenters liquidity based on VWAP and market conditions
- Emits `EthAbundance`, `EthScarcity`, `Recentered` events
- Optimizer V3: reads staking sentiment to adjust parameters
## Indexer (Ponder)
[Ponder](https://ponder.sh) indexes on-chain events into PostgreSQL via GraphQL:
- **Stats**: Protocol-wide metrics (supply, reserves, fees)
- **Positions**: Individual staking positions with status
- **Holders**: Token balances with cost basis tracking
- **Recenters**: Liquidity management history
- **Ring Buffer**: 7-day hourly snapshots of ETH reserve, mints, burns, tax
### Key Endpoints
- GraphQL: `http://localhost:42069` (proxied at `/api/graphql`)
- Health: `http://localhost:42069/health`
- Ready: `http://localhost:42069/ready` (200 when historical sync complete)
## Web Frontends
### Landing Page (`/`)
- Marketing + protocol health dashboard
- LiveStats component with real-time metrics
- Wallet connect + holder card for returning users
- Three variants: defensive, offensive, mixed
### Staking App (`/app/`)
- Full staking dashboard
- Position management (stake, unstake, adjust tax)
- Wallet P&L with cost basis tracking
- Charts and protocol statistics
### Shared Package (`packages/web3/`)
- `createHarbConfig()` — wagmi config with Base chain + connectors
- `useTokenBalance` composable
- Re-exports of wagmi composables for consistent imports
## Infrastructure
- **Chain**: Base (Ethereum L2), chainId 8453
- **Local dev**: Anvil fork of Base Sepolia (chainId 31337)
- **Proxy**: Caddy reverse proxy on port 8081
- **CI**: Woodpecker CI with pre-built Docker images
- **Source**: [codeberg.org/johba/harb](https://codeberg.org/johba/harb)

View file

@ -1,288 +0,0 @@
# KRAIKEN Mainnet Deployment Runbook
**Target chain:** Base (L2)
**Contract version:** V2 (OptimizerV3 w/ directional VWAP)
---
## 1. Pre-Deployment Checklist
- [ ] All tests pass: `cd onchain && forge test`
- [ ] Gas snapshot baseline: `forge snapshot`
- [ ] Security review complete (see `analysis/SECURITY_REVIEW.md`)
- [ ] Storage layout verified for UUPS upgrade (see `analysis/STORAGE_LAYOUT.md`)
- [ ] Floor ratchet mitigation status confirmed (branch `fix/floor-ratchet`)
- [ ] Multisig wallet ready for `feeDestination` (Gnosis Safe on Base)
- [ ] Deployer wallet funded with sufficient ETH for gas (~0.05 ETH)
- [ ] LiquidityManager funding wallet ready (initial ETH seed for pool positions)
- [ ] `.secret` seed phrase file present in `onchain/` (deployer account)
- [ ] Base RPC endpoint configured and tested
- [ ] Etherscan/Basescan API key ready for contract verification
- [ ] kraiken-lib version updated: `COMPATIBLE_CONTRACT_VERSIONS` includes `2`
---
## 2. Contract Deployment Order
All contracts are deployed in a single broadcast transaction via `DeployBaseMainnet.sol`:
```
1. Kraiken token (ERC20 + ERC20Permit)
2. Stake contract (Kraiken address, feeDestination)
3. Kraiken.setStakingPool(Stake)
4. Uniswap V3 Pool (create or use existing, FEE=10000)
5. Pool initialization (1 cent starting price)
6. OptimizerV3 implementation + ERC1967Proxy
7. LiquidityManager (factory, WETH, Kraiken, OptimizerProxy)
8. LiquidityManager.setFeeDestination(multisig)
9. Kraiken.setLiquidityManager(LiquidityManager)
```
### Deploy Command
```bash
cd onchain
# Verify configuration first
cat script/DeployBaseMainnet.sol # Check feeDest, weth, v3Factory
# Dry run (no broadcast)
forge script script/DeployBaseMainnet.sol \
--rpc-url $BASE_RPC \
--sender $(cast wallet address --mnemonic "$(cat .secret)")
# Live deployment
forge script script/DeployBaseMainnet.sol \
--rpc-url $BASE_RPC \
--broadcast \
--verify \
--etherscan-api-key $BASESCAN_API_KEY \
--slow
```
**Critical:** The `--slow` flag submits transactions one at a time, waiting for confirmation. This prevents nonce issues on Base.
### Record Deployment Addresses
After deployment, save all addresses from console output:
```bash
# Update deployments file
cat >> deployments-mainnet.json << 'EOF'
{
"chain": "base",
"chainId": 8453,
"kraiken": "0x...",
"stake": "0x...",
"pool": "0x...",
"liquidityManager": "0x...",
"optimizerProxy": "0x...",
"optimizerImpl": "0x...",
"feeDestination": "0x...",
"deployer": "0x...",
"deployedAt": "2026-XX-XX",
"txHash": "0x..."
}
EOF
```
---
## 3. Post-Deployment Setup
### 3.1 Fund LiquidityManager
The LM needs ETH to create initial positions:
```bash
# Send ETH to LiquidityManager (unwrapped — it will wrap to WETH internally)
cast send $LIQUIDITY_MANAGER --value 10ether \
--rpc-url $BASE_RPC \
--mnemonic "$(cat .secret)"
```
### 3.2 Trigger First Recenter
`recenter()` is permissionless — any address may call it. The 60-second cooldown (`MIN_RECENTER_INTERVAL`) and TWAP oracle check are always enforced.
```bash
# Wait for pool to accumulate some TWAP history (~5 minutes of trades)
# Anyone can trigger the first recenter; txnBot will take over ongoing calls
cast send $LIQUIDITY_MANAGER "recenter()" \
--rpc-url $BASE_RPC \
--from $TXNBOT_ADDRESS
```
### 3.4 Configure txnBot
Update `services/txnBot/` configuration for Base mainnet:
- Set `LIQUIDITY_MANAGER` address
- Set `KRAIKEN` address
- Set RPC to Base mainnet
- Deploy txnBot service
### 3.5 Configure Ponder Indexer
```bash
# Update kraiken-lib/src/version.ts
export const COMPATIBLE_CONTRACT_VERSIONS = [2];
# Update Ponder config for Base mainnet addresses
# Set PONDER_NETWORK=BASE in environment
```
### 3.6 Update Frontend
- Update contract addresses in web-app configuration
- Update kraiken-lib ABIs: `cd onchain && forge build` then rebuild kraiken-lib
- Deploy frontend to production
---
## 4. Optimizer Upgrade Procedure
If upgrading an existing Optimizer proxy to OptimizerV3:
```bash
cd onchain
# Set proxy address
export OPTIMIZER_PROXY=0x...
# Dry run
forge script script/UpgradeOptimizer.sol \
--rpc-url $BASE_RPC
# Execute upgrade
forge script script/UpgradeOptimizer.sol \
--rpc-url $BASE_RPC \
--broadcast \
--verify \
--etherscan-api-key $BASESCAN_API_KEY
# Verify post-upgrade
cast call $OPTIMIZER_PROXY "getLiquidityParams()" --rpc-url $BASE_RPC
```
**Expected output:** Bear-mode defaults (CI=0, AS=0.3e18, AW=100, DD=0.3e18) since staking will be <91%.
---
## 5. Verification Steps
Run these checks after deployment to confirm everything is wired correctly:
```bash
# 1. Kraiken token
cast call $KRAIKEN "VERSION()" --rpc-url $BASE_RPC # Should return 2
cast call $KRAIKEN "peripheryContracts()" --rpc-url $BASE_RPC # LM + Stake addresses
# 2. LiquidityManager
cast call $LM "feeDestination()" --rpc-url $BASE_RPC # Should be multisig
cast call $LM "lastRecenterTime()" --rpc-url $BASE_RPC # Should be non-zero after first recenter
cast call $LM "positions(0)" --rpc-url $BASE_RPC # Floor position (after recenter)
cast call $LM "positions(1)" --rpc-url $BASE_RPC # Anchor position
cast call $LM "positions(2)" --rpc-url $BASE_RPC # Discovery position
# 3. OptimizerV3 (through proxy)
cast call $OPTIMIZER "getLiquidityParams()" --rpc-url $BASE_RPC
# 4. Pool state
cast call $POOL "slot0()" --rpc-url $BASE_RPC # Current tick, price
cast call $POOL "liquidity()" --rpc-url $BASE_RPC # Total liquidity
# 5. Stake contract
cast call $STAKE "nextPositionId()" --rpc-url $BASE_RPC # Should be 0 initially
# 6. ETH balance
cast balance $LM --rpc-url $BASE_RPC # Should show funded amount
```
---
## 6. Emergency Procedures
### 6.1 Pause Recentering
**NOTE:** `recenter()` is permissionless — there is no access-control switch to block it. The only mechanism that prevents a recenter is the 60-second `MIN_RECENTER_INTERVAL` cooldown and the TWAP oracle check. There is no admin function to revoke or grant access.
In an attack scenario the most effective response is to upgrade or replace the contract (see §6.3 / §6.4). Existing positions remain in place and continue earning fees regardless of recenter activity.
### 6.2 Upgrade Optimizer to Safe Defaults
Deploy a minimal "safe" optimizer that always returns bear parameters:
```bash
# Deploy SafeOptimizer with hardcoded bear params
# Upgrade proxy to SafeOptimizer
OPTIMIZER_PROXY=$OPTIMIZER forge script script/UpgradeOptimizer.sol \
--rpc-url $BASE_RPC --broadcast
```
### 6.3 Emergency Parameter Override
If the optimizer needs temporary override, deploy a new implementation with hardcoded safe parameters:
- CI=0, AS=30% (0.3e18), AW=100, DD=0.3e18 (bear defaults)
- These were verified safe across all 1050 parameter sweep combinations
### 6.4 Rollback Plan
**There is no rollback for deployed contracts.** Mitigation options:
- Upgrade optimizer proxy to revert to V1/V2 logic
- Revoke recenter access to freeze positions
- The LiquidityManager itself is NOT upgradeable (by design — immutable control)
- In worst case: deploy entirely new contract set, migrate liquidity
### 6.5 Known Attack Response: Floor Ratchet
If floor ratchet extraction is detected (rapid recenters + floor tick creeping toward current price):
1. **Immediately** upgrade the optimizer to safe bear-mode defaults (§6.2) — this maximises floor distance (AW=100 → 7000-tick clearance) and makes ratchet extraction significantly harder while a patched LiquidityManager is prepared. Note: there is no access-control switch on `recenter()`; the 60s cooldown is the only rate limiter
2. Assess floor position state via `positions(0)`
3. Deploy patched LiquidityManager if fix is ready
4. Current mitigation: bear-mode parameters (AW=100) create 7000-tick floor distance, making ratchet extraction significantly harder
---
## 7. Monitoring Setup
### On-Chain Monitoring
Track these metrics via Ponder or direct RPC polling:
| Metric | How | Alert Threshold |
|--------|-----|-----------------|
| Floor tick distance | `positions(0).tickLower - currentTick` | < 2000 ticks |
| Recenter frequency | Count `recenter()` calls per hour | > 10/hour |
| LM ETH balance | `address(LM).balance + WETH.balanceOf(LM)` | < 1 ETH (most ETH is in pool positions) |
| VWAP drift | `getVWAP()` vs current price | > 50% divergence |
| Optimizer mode | `getLiquidityParams()` return values | Unexpected bull in low-staking |
| Fee revenue | WETH transfers to feeDestination | Sudden drop to 0 |
### Off-Chain Monitoring
- txnBot health: `GET /api/txn/status` — should return healthy
- Ponder indexing: `GET /api/graphql` — query `stats` entity
- Frontend version check: `useVersionCheck()` composable validates contract VERSION
### Alerting Triggers
1. **Critical:** Floor position liquidity = 0 (no floor protection)
2. **Critical:** recenter() reverts for > 1 hour
3. **High:** > 20 recenters in 1 hour (potential manipulation)
4. **Medium:** VWAP compression triggered (high cumulative volume)
5. **Low:** Optimizer returns bull mode (verify staking metrics justify it)
---
## 8. Deployment Timeline
| Step | Duration | Dependency |
|------|----------|------------|
| Deploy contracts | ~2 min | Funded deployer wallet |
| Verify on Basescan | ~5 min | Deployment complete |
| Fund LiquidityManager | ~1 min | Deployment complete |
| Wait for TWAP history | ~5-10 min | Pool initialized |
| First recenter | ~1 min | TWAP history accumulated |
| Deploy txnBot | ~5 min | Addresses configured |
| Deploy Ponder | ~10 min | Addresses + kraiken-lib updated |
| Deploy frontend | ~5 min | Ponder running |
| **Total** | **~30-40 min** | |

View file

@ -1,134 +0,0 @@
# Docker Development Environment
The Docker stack powers `scripts/dev.sh` using containerized services. Every boot spins up a fresh Base Sepolia fork, redeploys contracts, seeds liquidity, and launches the live-reload services behind Caddy on port 8081.
## Service Topology
- `anvil` Base Sepolia fork with optional mnemonic from `onchain/.secret.local`
- `bootstrap` one-shot job running `DeployLocal.sol`, seeding liquidity, priming blocks, and writing shared env files (uses `scripts/bootstrap-common.sh`)
- `postgres` PostgreSQL 16 database for Ponder indexer state
- `ponder` `npm run dev` for the indexer (port 42069)
- `webapp` Vite dev server for `web-app` (port 5173)
- `landing` Vite dev server for landing page (port 5174)
- `txn-bot` automation loop plus Express status API (port 43069)
- `otterscan` block explorer UI (port 5100)
- `caddy` reverse proxy at `http://localhost:8081`, routing `/app/` → webapp, `/api/graphql` → ponder, `/api/rpc` → anvil, `/` → landing
All containers mount the repository so code edits hot-reload exactly as the local script. Named volumes keep `node_modules` caches between restarts.
## Prerequisites
### Linux
```bash
# Install Docker Engine
curl -fsSL https://get.docker.com | sh
sudo usermod -aG docker $USER
# Logout and login again for group changes to take effect
```
### Mac
```bash
# Install Colima (open-source Docker Desktop alternative)
brew install colima docker docker-compose
# Start Colima VM with recommended resources
colima start --cpu 4 --memory 8 --disk 100
# Verify installation
docker ps
```
## Launching
**Recommended**: Use the helper script
```bash
./scripts/dev.sh start
```
This will:
1. Build kraiken-lib
2. Start Anvil (Base Sepolia fork)
3. Deploy contracts via bootstrap
4. Start Ponder (indexes events)
5. Start web-app, landing, txn-bot
6. Start Caddy reverse proxy on port 8081
**Startup time**: ~6 minutes on first run (includes Ponder indexing 300+ blocks)
**Manual approach** (not recommended):
```bash
docker compose up -d
```
**Stopping the stack:**
```bash
./scripts/dev.sh stop
# or
docker compose down
```
**Quick restarts for development:**
- `./scripts/dev.sh restart --light` - Fast restart (~10-20s): only webapp + txnbot, preserves Anvil/Ponder state. **Use for frontend changes.**
- `./scripts/dev.sh restart --full` - Full restart (~6 min): redeploys contracts, fresh state. **Use for contract changes.**
**Important**: Every full restart redeploys contracts and rewrites `services/ponder/.env.local` and `tmp/containers/txnBot.env`.
### Access Points (via Caddy on port 8081)
**For reviewing code changes in your browser:**
- Landing page: `http://localhost:8081/` (marketing site)
- Web-app: `http://localhost:8081/app/` (staking interface - **use this for testing**)
- GraphQL Playground: `http://localhost:8081/api/graphql`
- TxnBot status: `http://localhost:8081/api/txn/status`
**Direct RPC access:**
- Anvil RPC: `http://localhost:8081/api/rpc` (or `http://localhost:8545` directly)
**Hot reload workflow:**
1. Start stack: `./scripts/dev.sh start`
2. Open `http://localhost:8081/app/` in your browser
3. Edit files in `web-app/src/` - changes appear instantly (Vite HMR)
4. Edit files in `landing/src/` - changes appear on `http://localhost:8081/`
5. Edit smart contracts in `onchain/src/` - requires `./scripts/dev.sh restart --full`
## Configuration Knobs
Set environment variables before `docker-compose up`:
- `FORK_URL` Anvil upstream RPC (defaults to `https://sepolia.base.org`)
- `DEPLOYER_PK`, `DEPLOYER_ADDR` override deployer wallet; otherwise derived from `.secret.local` or Foundry defaults
- `TXNBOT_PRIVATE_KEY`, `TXNBOT_ADDRESS`, `TXNBOT_FUND_VALUE` customise bot signer and funding
Edit `containers/Caddyfile` if you need different routes or ports.
## Known Limitations
- State is ephemeral; every restart wipes the fork and redeploys contracts.
- Processes run in dev/watch mode (`npm run dev`), so staging traffic is not production hardened.
- Secrets live in env files inside the repo mount because no external secret store is wired in.
## Troubleshooting
### Mac: "Cannot connect to Docker daemon"
```bash
# Ensure Colima is running
colima status
colima start
# Verify Docker can connect
docker ps
```
### Permission errors on Linux
```bash
# Add your user to the docker group
sudo usermod -aG docker $USER
# Logout and login again, or use:
newgrp docker
```
### Port conflicts
If you see "port already in use" errors:
```bash
# Check what's using the port
lsof -i :8081 # or :8545, :5173, etc.
# Stop conflicting services or change ports in docker-compose.yml
```

View file

@ -1,83 +0,0 @@
# Staking Mechanics
## Tax Rates
Staking uses a **self-assessed tax** mechanism (Harberger Tax). You choose what yearly rate you're willing to pay. This creates a continuous auction for staking slots.
### Rate Tiers
There are 30 discrete tax rates (percentages are yearly):
```
1%, 3%, 5%, 8%, 12%, 18%, 24%, 30%, 40%, 50%,
60%, 80%, 100%, 130%, 180%, 250%, 320%, 420%, 540%, 700%,
920%, 1200%, 1600%, 2000%, 2600%, 3400%, 4400%, 5700%, 7500%, 9700%
```
Rates are discrete (not continuous) to prevent micro-increment griefing.
### Tax Calculation
Tax accrues continuously from the moment you stake:
```
tax_owed = (staked_amount × tax_rate × time_held) / (365 days × 100)
```
Tax is paid when you:
- Unstake (deducted from payout)
- Get snatched (deducted from compensation)
- Manually pay via the dashboard
## Snatching (Position Challenges)
Anyone can take your staking slots by committing to a higher tax rate.
### Rules
1. **Higher rate required**: The challenger must use a strictly higher tax rate tier
2. **3-day minimum hold**: Positions are protected for 72 hours after creation
3. **Full compensation**: The snatched owner receives market value of their position minus accrued tax
4. **Discrete tiers only**: You can't snatch by increasing the rate by 0.01% — you must jump to the next tier
### What the snatched owner receives
```
payout = (shares / total_shares) × current_total_supply - tax_owed
```
The payout reflects the current token price, not the entry price. If the protocol grew, you get more back than you put in.
## Staking Pool
The staking pool holds 20% of all KRK supply. When new tokens are minted (from buys), stakers receive a proportional share. When tokens are burned (from sells), the pool shrinks proportionally.
### Owner Slots
- Total: 20,000 slots (representing 20% of supply)
- Your slots = your percentage × 20,000
- 1,000 slots = 1% of the staking pool
### Minimum Stake
To prevent fragmentation, there's a minimum stake:
```
min_stake = total_supply / 3000
```
At ~1.2M total supply, this is approximately 399 KRK.
## Adjusting Your Rate
You can change your tax rate on an existing position:
- **Increasing**: Takes effect immediately, extends snatch protection
- **Decreasing**: Takes effect after a delay to prevent gaming
## Strategy Guide
| Goal | Recommended Rate | Why |
|------|-----------------|-----|
| Long-term earning | Low (1-8%) | Cheap to hold, accept challenge risk |
| Defensive holding | Medium (18-40%) | Balance of cost and protection |
| Aggressive accumulation | High (60%+) | Hard to challenge, but expensive |
| Short-term flip | Lowest available | Minimize holding cost |

View file

@ -1,60 +0,0 @@
# Tokenomics
## KRK Token
- **Standard**: ERC20 on Base (Ethereum L2)
- **Supply**: Dynamic (minted on buys, burned on sells)
- **Backing**: Every KRK token is backed by ETH in the trading vault
## ETH Reserve & Floor Price
The protocol maintains an ETH reserve in a Uniswap V3 concentrated liquidity position. This creates a floor price:
```
floor_price = ETH_reserve / total_KRK_supply
```
**Key property**: The floor price can only go up (in ETH terms) because:
- Buys add ETH to the reserve and mint KRK at market price (above floor)
- Sells remove KRK from supply and return ETH at market price
- Trading fees from the pool add to the reserve without minting new tokens
## Supply Mechanics
### Minting (on buy)
When someone buys KRK on Uniswap:
1. ETH enters the pool
2. KRK is minted at market price
3. 20% of new tokens go to the staking pool (for stakers)
4. 80% goes to the buyer
### Burning (on sell)
When someone sells KRK:
1. KRK is burned
2. ETH leaves the pool at market price
3. The staking pool burns proportionally
## Liquidity Management
The LiquidityManager positions liquidity in a concentrated range around the current price:
### Modes
- **Scarcity** (bearish signal): Wide range, conservative positioning
- **Abundance** (bullish signal): Narrow range, aggressive fee capture
### Signals
The optimizer reads staking activity as a sentiment indicator:
- High staking ratio + low tax rates = genuine confidence → Bull mode
- Dropping staking or rising tax rates = uncertainty → Bear mode
### VWAP Tracking
The system tracks a volume-weighted average price (VWAP) to set liquidity ranges. This creates a "mirror floor" — a second price support level based on recent trading history.
## Fee Generation
Trading activity generates fees from the Uniswap V3 position. These fees accrue to the ETH reserve, increasing the floor price for all holders.
The fee rate depends on:
- Trading volume
- Liquidity concentration (narrower range = more fees per trade)
- Pool fee tier (1% on the KRK/WETH pair)

View file

@ -1,41 +0,0 @@
# Testing
## Contract Tests (Foundry)
Run inside `onchain/`:
```bash
forge build # compile contracts
forge test # run unit + fork tests
forge snapshot # gas snapshot
```
## Fuzzing
Scripts under `onchain/analysis/` generate replayable scenarios:
```bash
./analysis/run-fuzzing.sh [optimizer] debugCSV
```
## Integration Testing
After the stack boots via `dev.sh`:
- Anvil logs: check for revert errors
- Ponder GraphQL: `http://localhost:8081/api/graphql`
- txnBot health: `http://localhost:8081/api/txn/status`
## E2E Tests (Playwright)
Full-stack tests in `tests/e2e/` verify complete user journeys (mint ETH → swap KRK → stake).
```bash
npm run test:e2e # from repo root
```
- Tests use a mocked wallet provider with Anvil accounts.
- In CI, the Woodpecker `e2e.yml` pipeline runs these against pre-built service images.
- See [docs/ci-pipeline.md](ci-pipeline.md) for CI-specific E2E details.
## Version Validation System
The stack enforces version compatibility across contracts, indexer, and frontend:
- **Contract VERSION**: `Kraiken.sol` exposes a `VERSION` constant (currently v2) that must be incremented for breaking changes to TAX_RATES, events, or core data structures.
- **Ponder Validation**: On startup, Ponder reads the contract VERSION and validates against `COMPATIBLE_CONTRACT_VERSIONS` in `kraiken-lib/src/version.ts`. Fails hard (exit 1) on mismatch to prevent indexing wrong data.
- **Frontend Check**: Web-app validates `KRAIKEN_LIB_VERSION` at runtime (currently placeholder; future: query Ponder GraphQL for full 3-way validation).
- **CI Enforcement**: Woodpecker `release.yml` pipeline validates that contract VERSION matches `COMPATIBLE_CONTRACT_VERSIONS` before release.
- See `VERSION_VALIDATION.md` (repo root) for complete architecture, workflows, and troubleshooting.

View file

@ -1,58 +0,0 @@
import tseslint from '@typescript-eslint/eslint-plugin';
import tsparser from '@typescript-eslint/parser';
export default [
{
name: 'tests/files-to-lint',
files: ['tests/**/*.ts', 'scripts/harb-evaluator/**/*.ts'],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 2022,
sourceType: 'module',
},
globals: {
process: 'readonly',
console: 'readonly',
fetch: 'readonly',
setTimeout: 'readonly',
Date: 'readonly',
Promise: 'readonly',
},
},
plugins: {
'@typescript-eslint': tseslint,
},
rules: {
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/no-unused-vars': [
'error',
{
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_',
},
],
},
},
{
name: 'arch/no-fixed-delays',
files: ['tests/**/*.ts', 'scripts/harb-evaluator/**/*.ts'],
rules: {
'no-restricted-syntax': [
'error',
{
selector: "CallExpression[callee.property.name='waitForTimeout']",
message:
'[BANNED] waitForTimeout is a fixed delay. → Subscribe to events instead (eth_newFilter for on-chain, waitForSelector/waitForURL for DOM). → Polling with timeout is acceptable only if no event source exists. → See AGENTS.md #Engineering Principles.',
},
{
selector:
"NewExpression[callee.name='Promise'] > ArrowFunctionExpression CallExpression[callee.name='setTimeout']",
message:
'[BANNED] Promise+setTimeout sleep pattern. → Use event subscription or polling with timeout instead. → See AGENTS.md #Engineering Principles.',
},
],
},
},
];

View file

@ -1,507 +0,0 @@
# Evidence Directory
Machine-readable process results for the KRAIKEN optimizer pipeline. All formulas
(evolution, red-team, holdout, user-test) write structured JSON here.
## Purpose
- **Planner input** — the planner reads these files to decide next actions
(e.g. "last red-team showed IL vulnerability → trigger evolution").
- **Diffable history**`git log evidence/` shows how metrics change over time.
- **Permanent record** — separate from `tmp/` which is ephemeral.
## Directory Layout
```
evidence/
evolution/
YYYY-MM-DD.json # run params, generation stats, best fitness, champion file
red-team/
YYYY-MM-DD.json # per-attack results, floor held/broken, ETH extracted
holdout/
YYYY-MM-DD-prNNN.json # per-scenario pass/fail, gate decision
user-test/
YYYY-MM-DD.json # per-persona reports, screenshot refs, friction points
resources/
YYYY-MM-DD.json # disk, RAM, API call counts, budget burn, CI queue depth
protocol/
YYYY-MM-DD.json # TVL, accumulated fees, position count, rebalance frequency
```
## Delivery Pattern
Every formula follows the same three-step pattern:
1. **Evidence file** → committed to `evidence/` on main
2. **Git artifacts** (new code, attack vectors, evolved programs) → PR
3. **Human summary** → issue comment with key metrics + link to evidence file
---
## Fee-Income Calculation Model
This section documents how `delta_bps` values in red-team and holdout evidence files
are derived, so that recorded values can be independently verified.
### Measurement tool
`delta_bps` is computed from two snapshots of **LM total ETH** taken by
[`onchain/script/LmTotalEth.s.sol`](../onchain/script/LmTotalEth.s.sol):
```
lm_total_eth = lm.balance (free ETH)
+ WETH.balanceOf(lm) (free WETH)
+ Σ positionEthPrincipal(stage) for stage ∈ {FLOOR, ANCHOR, DISCOVERY}
```
Each position's ETH principal is calculated via `LiquidityAmounts.getAmountsForLiquidity`
at the pool's current `sqrtPriceX96`. Only the WETH side of each position is summed;
the KRK side is excluded.
### What is and is not counted
| Counted | Not counted |
|---------|-------------|
| Free native ETH on the LM contract | KRK balance (free or in positions) |
| Free WETH (ERC-20) on the LM contract | Uncollected fees still inside Uni V3 positions |
| ETH-side principal of all 3 positions | KRK fees transferred to `feeDestination` |
**Key consequence:** Uncollected fees accrued inside Uniswap V3 positions are invisible
to `LmTotalEth` until a `recenter()` call executes `pool.burn` + `pool.collect`, which
converts them into free WETH on the LM contract (or transfers them to `feeDestination`).
A `recenter()` between the two snapshots materializes these fees into the measurement.
### `delta_bps` formula
```
delta_bps = (lm_eth_after lm_eth_before) / lm_eth_before × 10_000
```
Where `lm_eth_before` and `lm_eth_after` are `LmTotalEth` readings taken before and
after the attack sequence. Each attack is snapshot-isolated (Anvil snapshot → execute →
measure → revert), so per-attack `delta_bps` values are independent.
### Components that drive `delta_bps`
A round-trip trade (buy KRK with ETH, then sell KRK back for ETH) through the LM's
dominant positions produces a positive `delta_bps` from three sources:
1. **Pool fee income (1% per leg).** The WETH/KRK pool charges a 1% fee (`FEE = 10_000`
in `LiquidityManager.sol`). On a simple round trip this contributes ~2% of volume.
However, fees accrue as uncollected position fees and only become visible after
`recenter()` materializes them. If no recenter occurs between snapshots, fee income
is partially hidden (reflected only indirectly through reduced trade output).
2. **Concentrated-liquidity slippage.** The LM's three-position strategy concentrates
most liquidity in narrow tick ranges. Trades that exceed the depth of a position
range push through progressively thinner liquidity, causing super-linear slippage.
The attacker receives fewer tokens per unit of input on each marginal unit. This
slippage transfers value to the LM's positions as increased ETH principal.
3. **Recenter repositioning gain.** When `recenter()` is called between trade legs:
- All three positions are burned and fees collected.
- New positions are minted at the current price.
- Any accumulated fees (WETH portion) become free WETH and are redeployed as new
position liquidity. KRK fees are sent to `feeDestination`.
- The repositioned liquidity changes the tick ranges the next trade interacts with.
### Why `delta_bps` is non-linear
A naive estimate of `delta_bps ≈ volume × 1% × 2 legs / lm_eth_before × 10_000`
underestimates the actual value for large trades because:
- **Slippage dominates at high volume.** When trade volume approaches or exceeds the
ETH depth of the active positions, the price moves through the entire concentrated
range and into thin or empty ticks. The slippage loss to the attacker (= gain to the
LM) grows super-linearly with volume.
- **Multi-recenter compounding.** Strategies that call `recenter()` between sub-trades
materialize intermediate fees and reposition liquidity at a new price. Subsequent
trades pay fees at the new tick ranges, compounding the total fee capture.
- **KRK fee exclusion.** KRK fees collected during `recenter()` are transferred to
`feeDestination` and excluded from `LmTotalEth`. This means the measurement captures
the ETH-side gain but not the KRK-side gain — `delta_bps` understates total protocol
revenue.
### Fee destination behaviour
When `feeDestination` is `address(0)` or `address(this)` (the LM contract itself),
fees are **not** transferred out — they remain as deployable liquidity on the LM.
In this configuration, materialized WETH fees increase `lm_total_eth` directly. When
`feeDestination` is an external address, WETH fees are transferred out and do **not**
contribute to `lm_total_eth`. The red-team test environment uses `feeDestination =
address(this)` so that fee income is fully reflected in `delta_bps`.
### Worked example
Using `attacks[1]` from `evidence/red-team/2026-03-20.json`:
> **"Buy → Recenter → Sell (800 ETH round trip)"** — `delta_bps: 1179`
**Given:**
- `lm_eth_before` = 999,999,999,999,999,999,998 wei ≈ 1000 ETH
- Trade volume = 800 ETH (buy leg) + equivalent KRK sell leg
- Pool fee rate = 1% per swap
- `feeDestination = address(this)` (fees stay in LM)
**Step-by-step derivation:**
1. **Buy leg (800 ETH → KRK):** The 800 ETH buy pushes the price ~4000 ticks into
the concentrated positions. The pool charges 1% (≈8 ETH in fees accruing to
positions). Because liquidity is concentrated, the price moves far — the attacker
receives significantly fewer KRK than a constant-product AMM would give.
After the buy, position ETH principal increases (price moved up = more ETH value
in range).
2. **Recenter:** Positions are burned, collecting all accrued fees. New positions are
minted at the new (higher) price. The ~8 ETH in WETH fees plus the ETH-side
principal become redeployable liquidity.
3. **Sell leg (KRK → ETH):** The attacker sells all acquired KRK back through the
newly positioned liquidity. Another 1% fee applies. Because the attacker received
fewer KRK than 800 ETH worth (due to buy-leg slippage), the sell leg returns
significantly less than 800 ETH. The price drops back but the LM retains the
slippage differential.
4. **Result:** `lm_eth_after ≈ 1000 + 117.9 ≈ 1117.9 ETH`.
```
delta_bps = (1117.9 1000) / 1000 × 10_000 = 1179 bps
```
The ~117.9 ETH gain comes from: 1% fees on both legs (~16 ETH) **plus** ~102 ETH
in concentrated-liquidity slippage loss by the attacker. The slippage component
dominates because 800 ETH far exceeds the depth of the anchor/discovery positions,
pushing the trade through increasingly thin liquidity.
**Cross-check — why naive formula fails:**
```
naive = 800 × 0.01 × 2 / 1000 × 10_000 = 160 bps (actual: 1179 bps)
```
The naive estimate assumes uniform liquidity (constant slippage = fee rate only).
The 7× difference is entirely due to concentrated-liquidity slippage on a trade that
exceeds position depth.
---
## Schema: `evolution/YYYY-MM-DD.json`
Records one optimizer evolution run.
```json
{
"date": "YYYY-MM-DD",
"run_params": {
"generations": 50,
"population_size": 20,
"seed": 42,
"base_optimizer": "OptimizerV3"
},
"generation_stats": [
{
"generation": 1,
"best_fitness": -12.4,
"mean_fitness": -34.1,
"worst_fitness": -91.2
}
],
"best_fitness": -8.7,
"champion_file": "onchain/src/OptimizerV4.sol",
"champion_commit": "abc1234",
"verdict": "improved" | "no_improvement"
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of the run |
| `run_params` | object | Input parameters used |
| `generation_stats` | array | Per-generation fitness summary |
| `best_fitness` | number | Best fitness score achieved (lower = better loss for LM) |
| `champion_file` | string | Repo-relative path to winning optimizer |
| `champion_commit` | string | Git commit SHA of the champion (if promoted) |
| `verdict` | string | `"improved"` or `"no_improvement"` |
---
## Schema: `red-team/YYYY-MM-DD.json`
Records one adversarial red-team run against a candidate optimizer.
```json
{
"date": "YYYY-MM-DD",
"candidate": "OptimizerV3",
"candidate_commit": "abc1234",
"optimizer_profile": "push3-default",
"lm_eth_before": 1000000000000000000000,
"lm_eth_after": 998500000000000000000,
"eth_extracted": 1500000000000000000,
"floor_held": false,
"methodology": "Each attack is snapshot-isolated: Anvil snapshot before, execute, measure, revert.",
"verdict": "floor_broken" | "floor_held",
"attacks": [
{
"strategy": "Flash buy + stake + recenter loop",
"pattern": "wrap → buy → stake → recenter_multi → sell",
"result": "DECREASED" | "HELD" | "INCREASED",
"delta_bps": -150,
"insight": "Rapid recenters pack ETH into floor while ratcheting it toward current price"
}
]
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of the run |
| `candidate` | string | Optimizer under test |
| `candidate_commit` | string | Git commit SHA of the optimizer under test |
| `optimizer_profile` | string | Named profile / push3 variant |
| `lm_eth_before` | integer (wei) | LM total ETH at start |
| `lm_eth_after` | integer (wei) | LM total ETH at end |
| `eth_extracted` | integer (wei) | `lm_eth_before - lm_eth_after` (0 if floor held) |
| `floor_held` | boolean | `true` if no ETH was extracted |
| `methodology` | string | How the red-team run was conducted (e.g. snapshot-isolation procedure, measurement tool, revert strategy). Free-text; should be detailed enough to reproduce the run independently |
| `verdict` | string | `"floor_held"` or `"floor_broken"` |
| `attacks[].strategy` | string | Human-readable strategy name |
| `attacks[].pattern` | string | Abstract op sequence (e.g. `wrap → buy → stake`) |
| `attacks[].result` | string | `"DECREASED"`, `"HELD"`, or `"INCREASED"` |
| `attacks[].delta_bps` | integer | LM ETH change in basis points |
| `attacks[].insight` | string | Key finding from this strategy |
### Snapshot-Isolation Methodology
All red-team runs use **snapshot isolation** as the standard methodology. This
ensures that each attack is evaluated independently against the same initial
state, rather than against a cumulative balance modified by prior attacks.
**How it works:**
1. Before the first attack, the test runner records the initial `lm_eth_before`
value and takes an Anvil snapshot via the `anvil_snapshot` RPC method.
2. Each attack executes against this snapshot: run the attack, measure
`lm_eth_after`, compute `delta_bps`, then revert to the snapshot via
the `anvil_revert` RPC method.
3. The next attack begins from the exact same chain state as the previous one.
**Field semantics under snapshot isolation:**
| Field | Semantics |
|-------|-----------|
| `lm_eth_before` | LM total ETH at the shared initial snapshot — identical for every attack in the run |
| `lm_eth_after` | LM total ETH measured after this specific attack, before reverting |
| `attacks[].delta_bps` | Change relative to the shared `lm_eth_before`, not relative to any prior attack |
**Key implications:**
- `lm_eth_before` and `lm_eth_after` reflect **per-attack state**, not
cumulative historical balance. Each attack sees the same starting ETH.
- Attack results are independent and order-insensitive — reordering attacks does
not change any individual `delta_bps` value.
---
## Schema: `holdout/YYYY-MM-DD-prNNN.json`
Records a holdout quality gate evaluation for a specific PR.
```json
{
"date": "YYYY-MM-DD",
"pr": 123,
"candidate_commit": "abc1234",
"scenarios": [
{
"name": "bear_market_crash",
"passed": true,
"lm_eth_delta_bps": 12,
"notes": ""
},
{
"name": "flash_buy_exploit",
"passed": false,
"lm_eth_delta_bps": -340,
"notes": "Floor broken on 2000-trade run"
}
],
"scenarios_passed": 4,
"scenarios_total": 5,
"gate_passed": false,
"verdict": "pass" | "fail",
"blocking_scenarios": ["flash_buy_exploit"]
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of evaluation |
| `pr` | integer | PR number being evaluated |
| `candidate_commit` | string | Commit SHA under test |
| `scenarios` | array | One entry per holdout scenario |
| `scenarios[].name` | string | Scenario identifier |
| `scenarios[].passed` | boolean | Whether LM ETH held or improved |
| `scenarios[].lm_eth_delta_bps` | integer | LM ETH change in basis points |
| `scenarios[].notes` | string | Free-text notes on failure mode |
| `scenarios_passed` | integer | Count of passing scenarios |
| `scenarios_total` | integer | Total scenarios run |
| `gate_passed` | boolean | `true` if all required scenarios passed |
| `verdict` | string | `"pass"` or `"fail"` |
| `blocking_scenarios` | array of strings | Scenario names that caused failure |
---
## Schema: `user-test/YYYY-MM-DD.json`
Records a UX evaluation run across simulated personas.
```json
{
"date": "YYYY-MM-DD",
"personas": [
{
"name": "crypto_native",
"task": "stake_and_set_tax_rate",
"completed": true,
"friction_points": [],
"screenshot_refs": ["tmp/screenshots/crypto_native_stake.png"],
"notes": ""
},
{
"name": "defi_newcomer",
"task": "first_buy_and_stake",
"completed": false,
"friction_points": ["Tax rate slider label unclear", "No confirmation of stake tx"],
"screenshot_refs": ["tmp/screenshots/defi_newcomer_confused.png"],
"notes": "User abandoned at tax rate step"
}
],
"personas_completed": 1,
"personas_total": 2,
"critical_friction_points": ["Tax rate slider label unclear"],
"verdict": "pass" | "fail"
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of evaluation |
| `personas` | array | One entry per simulated persona |
| `personas[].name` | string | Persona identifier |
| `personas[].task` | string | Task the persona attempted |
| `personas[].completed` | boolean | Whether the task was completed |
| `personas[].friction_points` | array of strings | UX issues encountered |
| `personas[].screenshot_refs` | array of strings | Repo-relative paths to screenshots |
| `personas[].notes` | string | Free-text observations |
| `personas_completed` | integer | Count of personas who completed their task |
| `personas_total` | integer | Total personas evaluated |
| `critical_friction_points` | array of strings | Friction points that blocked task completion |
| `verdict` | string | `"pass"` if all personas completed, `"fail"` otherwise |
---
## Schema: `resources/YYYY-MM-DD.json`
Records one infrastructure resource snapshot.
```json
{
"date": "YYYY-MM-DD",
"disk": {
"used_bytes": 85899345920,
"total_bytes": 107374182400,
"used_pct": 80.0
},
"ram": {
"used_bytes": 3221225472,
"total_bytes": 8589934592,
"used_pct": 37.5
},
"api": {
"anthropic_calls_24h": 142,
"anthropic_budget_usd_used": 4.87,
"anthropic_budget_usd_limit": 50.0,
"anthropic_budget_pct": 9.7
},
"ci": {
"woodpecker_queue_depth": 2,
"woodpecker_running": 1
},
"staleness_threshold_days": 1,
"verdict": "ok" | "warn" | "critical"
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of the snapshot |
| `disk.used_bytes` | integer | Bytes used on the primary volume |
| `disk.total_bytes` | integer | Total bytes on the primary volume |
| `disk.used_pct` | number | Percentage of disk used |
| `ram.used_bytes` | integer | Bytes of RAM in use |
| `ram.total_bytes` | integer | Total bytes of RAM |
| `ram.used_pct` | number | Percentage of RAM used |
| `api.anthropic_calls_24h` | integer | Anthropic API calls in the past 24 hours |
| `api.anthropic_budget_usd_used` | number | USD spent against the Anthropic budget |
| `api.anthropic_budget_usd_limit` | number | Configured Anthropic budget ceiling in USD |
| `api.anthropic_budget_pct` | number | Percentage of budget consumed |
| `ci.woodpecker_queue_depth` | integer | Number of jobs waiting in the Woodpecker CI queue |
| `ci.woodpecker_running` | integer | Number of Woodpecker jobs currently running |
| `staleness_threshold_days` | integer | Maximum age in days before this record is considered stale (always 1) |
| `verdict` | string | `"ok"` (all metrics normal), `"warn"` (≥80% on any dimension), or `"critical"` (≥95% on any dimension) |
---
## Schema: `protocol/YYYY-MM-DD.json`
Records one on-chain protocol health snapshot.
```json
{
"date": "YYYY-MM-DD",
"block_number": 24500000,
"tvl_eth": "1234567890000000000000",
"tvl_eth_formatted": "1234.57",
"accumulated_fees_eth": "12345678900000000",
"accumulated_fees_eth_formatted": "0.012",
"position_count": 3,
"positions": [
{
"name": "floor",
"tick_lower": -887272,
"tick_upper": -200000,
"liquidity": "987654321000000000"
},
{
"name": "anchor",
"tick_lower": -200000,
"tick_upper": 0
},
{
"name": "discovery",
"tick_lower": 0,
"tick_upper": 887272
}
],
"rebalance_count_24h": 4,
"last_rebalance_block": 24499800,
"staleness_threshold_days": 1,
"verdict": "healthy" | "degraded" | "offline"
}
```
| Field | Type | Description |
|-------|------|-------------|
| `date` | string (ISO) | Date of the snapshot |
| `block_number` | integer | Block number at time of snapshot |
| `tvl_eth` | string (wei) | Total value locked across all LM positions in wei |
| `tvl_eth_formatted` | string | TVL formatted in ETH (2 dp) |
| `accumulated_fees_eth` | string (wei) | Fees accumulated by the LiquidityManager in wei |
| `accumulated_fees_eth_formatted` | string | Fees formatted in ETH (3 dp) |
| `position_count` | integer | Number of active Uniswap V3 positions (expected: 3) |
| `positions` | array | One entry per active position |
| `positions[].name` | string | Position label: `"floor"`, `"anchor"`, or `"discovery"` |
| `positions[].tick_lower` | integer | Lower tick boundary |
| `positions[].tick_upper` | integer | Upper tick boundary |
| `positions[].liquidity` | string | Liquidity amount in the position (wei-scale integer) |
| `rebalance_count_24h` | integer | Number of `recenter()` calls in the past 24 hours |
| `last_rebalance_block` | integer | Block number of the most recent `recenter()` call |
| `staleness_threshold_days` | integer | Maximum age in days before this record is considered stale (always 1) |
| `verdict` | string | `"healthy"` (positions active, TVL > 0), `"degraded"` (position_count < 3 or rebalance stalled), or `"offline"` (TVL = 0 or contract unreachable) |

View file

@ -1,36 +0,0 @@
{
"date": "2026-03-22",
"issue": 517,
"title": "Adversary parasitic LP extracts 29% from holder — all recenters fail",
"scenario": "staker-vs-holder",
"status": "fixed",
"root_cause": {
"summary": "PRICE_STABILITY_INTERVAL (300s) too long relative to MIN_RECENTER_INTERVAL (60s)",
"detail": "After a large trade moving the tick >1000 positions, the 5-minute TWAP average lagged behind the current price by hundreds of ticks, far exceeding MAX_TICK_DEVIATION (50). Recenter reverted with 'price deviated from oracle' for ~285s after each trade, creating a window where the LM could not reposition. The adversary's parasitic LP captured fees during this unprotected window.",
"revert_reasons": {
"after_adversary_setup": "price deviated from oracle",
"after_holder_buy": "price deviated from oracle",
"after_adversary_attack": "price deviated from oracle",
"after_holder_sell": "amplitude not reached"
},
"johba_comment_confirmed": "Parasitic LP does not directly block recentering (V3 positions are independent). The revert is from the TWAP stability check, not from position interference."
},
"fix": {
"file": "onchain/src/abstracts/PriceOracle.sol",
"change": "PRICE_STABILITY_INTERVAL reduced from 300 to 30 seconds",
"rationale": "30s still prevents same-block manipulation (Ethereum mainnet ~12s block time) while ensuring TWAP converges well within the 60s cooldown. After the fix, recenter succeeds within 61s of any trade.",
"security_impact": "Manipulation window reduced from 5 min to 30s. Attacker must hold manipulated price for 30+ seconds (2.5 blocks) before recenter accepts it. Combined with 60s cooldown, total manipulation window is <60s."
},
"tests_added": [
"testRecenterAfterLargeBuy_TWAPConverges — verifies recenter works after 5 ETH buy + 61s wait",
"testRecenterRejectsSameBlockManipulation — verifies TWAP check still blocks <30s manipulation",
"testAdversarialLP_HolderProtected — full parasitic LP scenario, holder loss < 5%"
],
"test_results": {
"total": 256,
"passed": 255,
"failed": 1,
"skipped": 0,
"pre_existing_failure": "FitnessEvaluator.t.sol::testBatchEvaluate (requires FITNESS_MANIFEST_DIR env var)"
}
}

View file

@ -1,80 +0,0 @@
{
"date": "2026-03-20",
"candidate": "Optimizer",
"optimizer_profile": "default",
"candidate_commit": "a1efa5942dd7ca863d069929ff0ca9b1909a1237",
"lm_eth_before": "999999999999999999998",
"lm_eth_after": "999999999999999999998",
"eth_extracted": 0,
"floor_held": true,
"verdict": "floor_held",
"strategies_tested": 7,
"strategies_total": 9,
"agent_runs": 2,
"methodology": "Each attack is snapshot-isolated: Anvil snapshot before, execute strategy, measure LM total ETH via LmTotalEth.s.sol, revert to snapshot. Per-attack delta_bps reflects the isolated measurement. Top-level lm_eth_after equals lm_eth_before because all attacks were individually reverted to the clean baseline.",
"attacks": [
{
"strategy": "Buy → Recenter → Sell (200 ETH round trip)",
"pattern": "buy → recenter → sell",
"result": "INCREASED",
"delta_bps": 24,
"insight": "The 1% Uniswap V3 pool fee is the primary defense. 200 ETH round trip generates ~2.4 ETH in fees for the LM. Fee income far exceeds any IL from repositioning."
},
{
"strategy": "Buy → Recenter → Sell (800 ETH round trip)",
"pattern": "buy → recenter → sell",
"result": "INCREASED",
"delta_bps": 1179,
"insight": "800 ETH buy moves price ~4000 ticks into concentrated positions, causing massive slippage. The attacker receives far fewer KRK per ETH as the trade moves through increasingly thin liquidity. Combined 1% pool fees and adverse slippage on both legs result in ~118 ETH net transfer to LM. Floor position (~75% of LM ETH in 200 ticks) absorbs the sell leg."
},
{
"strategy": "Multi-cycle buy → recenter (3×500 ETH) → sell all",
"pattern": "buy → recenter_multi → sell",
"result": "INCREASED",
"delta_bps": 465,
"insight": "Multiple buy-recenter cycles compound fee income. 1500 ETH total volume generated ~46.5 ETH in fees + slippage. Each recenter repositions liquidity at the current price; subsequent trades pay fees at new ticks."
},
{
"strategy": "Extreme Buy (2050 ETH) → Recenter at Deep Tick → Sell All",
"pattern": "buy → recenter → sell",
"result": "INCREASED",
"delta_bps": 3746,
"insight": "2050 ETH far exceeds pool depth (~1000 ETH in positions), causing extreme slippage on both legs. The attacker loses ~374 ETH (~18% of input) — mostly to slippage through thin liquidity beyond the concentrated positions, not just the 1% fee. The LM captures all of this as position value increase. Demonstrates that over-sized trades are self-defeating."
},
{
"strategy": "Stake to change optimizer params → exploit repositioning",
"pattern": "buy → stake → recenter",
"result": "INCREASED",
"delta_bps": 500,
"insight": "Staking parameter changes do not create exploitable repositioning windows. The +500 bps is from the buy-leg fee + slippage (50 ETH buy). Staking itself has no effect on LM ETH."
},
{
"strategy": "Exploit discovery position WETH consumption + asymmetric repositioning",
"pattern": "buy → recenter → sell",
"result": "INCREASED",
"delta_bps": 1179,
"insight": "Discovery position WETH consumption does not weaken the floor enough to enable extraction. Tested as 800 ETH round trip variant. 1% fee + slippage dominates all round-trip strategies. Subsumed by attack 2 (same pattern at same volume)."
},
{
"strategy": "One-way sell — buy KRK, recenter, sell at stale positions (no second recenter)",
"pattern": "buy → recenter → sell",
"result": "INCREASED",
"delta_bps": 24,
"insight": "Even without follow-up recenter, LM gained ETH. The cost of acquiring KRK (buy-leg fees + slippage) exceeds what can be extracted by selling through stale positions. Tested at 200 ETH. Subsumed by attack 1 (same effective pattern)."
},
{
"strategy": "Send KRK Directly to LM + Recenter (Supply Manipulation)",
"pattern": "buy → transfer → recenter",
"result": "INCREASED",
"delta_bps": 1000,
"insight": "Sending KRK to LM acts as a donation — reduces outstandingSupply and gives LM free KRK. Combined with 100 ETH buy-leg fees + slippage (~100 ETH total LM gain). Floor calculation handles reduced supply gracefully."
},
{
"strategy": "Floor Ratchet Extraction — initial phase only (buy → recenter_multi → sell through floor)",
"pattern": "buy → recenter_multi → sell",
"result": "INCREASED",
"delta_bps": 1179,
"insight": "Tests the initial phase of the known floor ratchet vector (#630). 800 ETH buy crashes price ~4000 ticks; only 1 of 10 recenters succeeds (TWAP oracle blocks the rest). Sell through floor fully absorbed. Net: LM gains ~118 ETH. IMPORTANT: this does NOT test the full 2000-trade oscillation variant that produced profitable outcomes (9/34 runs, up to +178 ETH extracted). That variant gradually drifts TWAP to bypass oracle protections. A dedicated full-sequence run is tracked as follow-up (#1082)."
}
]
}

View file

@ -1,24 +0,0 @@
{
"date": "2026-03-23",
"candidate": "Optimizer",
"optimizer_profile": "default",
"candidate_commit": "144d6a2",
"lm_eth_before": "999999999999999999998",
"lm_eth_after": "999999999999999999998",
"eth_extracted": 0,
"floor_held": true,
"verdict": "floor_held",
"strategies_tested": 1,
"strategies_total": 1,
"agent_runs": 0,
"methodology": "Full 2000-trade floor ratchet oscillation executed via AttackRunner.s.sol forge simulation (not broadcast — forge broadcast incompatible with try/catch recenter reverts). Attack file: onchain/script/backtesting/attacks/floor-ratchet-oscillation.jsonl. 10 oscillation rounds × 200 buy→recenter cycles (5 ETH per buy), with alternating stake/unstake/sell phases at tax rates 0 and 5. TWAP oracle protection (30s stability window, ±50 tick deviation) blocked 2019 of 2022 recenter attempts. Only 3 recenters succeeded — insufficient to drift positions. LM TVL increased from 9.61e21 to 10.79e21 wei (TVL metric including KRK→ETH conversion). Top-level lm_eth_before/lm_eth_after are snapshot-isolated measurements from LmTotalEth.s.sol (ETH-only metric, excludes KRK). The floor ratchet oscillation vector from #630 is defeated by the TWAP oracle + amplitude threshold + 1% pool fee defenses.",
"attacks": [
{
"strategy": "Floor Ratchet Oscillation — full 2000-trade buy → stake → recenter loop with TWAP drift",
"pattern": "buy → stake → recenter_multi → sell",
"result": "INCREASED",
"delta_bps": 1230,
"insight": "The 2000-trade oscillation variant from #630 is fully defeated. TWAP oracle stability check (±50 tick, 30s window) blocks 99.9% of recenter attempts after buy-driven price moves. The few recenters that succeed do not produce enough repositioning to enable extraction. The 1% Uniswap V3 pool fee on each of the 2000 buy legs (5 ETH × 2000 = 10,000 ETH volume) generates substantial fee income for the LM. Combined with concentrated liquidity slippage on the sell legs, the adversary loses ~12% of capital. The floor ratchet risk flagged in #630 (r=+0.890, 9/34 profitable) does not manifest against the current TWAP-protected Optimizer."
}
]
}

View file

@ -1,18 +0,0 @@
{
"date": "2026-03-26",
"candidate": "OptimizerV3",
"optimizer_profile": "push3-default",
"candidate_commit": "a76d393",
"lm_eth_before": "1000000000000000000000",
"lm_eth_after": "1399000000000000000000",
"eth_extracted": 0,
"floor_held": true,
"verdict": "floor_held",
"strategies_tested": 7,
"duration_seconds": 1440,
"methodology": "bootstrap-light + adversarial Claude agent, 7 diverse strategies",
"attacks": [],
"summary": "Floor held under all 7 adversarial strategies. LM ETH increased from ~1000 to ~1399. Attacker lost ETH to fees and slippage. No extraction vector found.",
"exit_code": 0,
"notes": "Original session crashed due to Claude auto-update mid-run (24 min in). Evidence reconstructed from session diagnostics. Raw per-attack data lost with worktree cleanup — attacks[] cannot be populated retroactively. Schema fields corrected by supervisor after review found violations in the merged file (profile→optimizer_profile, result→verdict, ETH values→wei, added candidate_commit/eth_extracted/attacks)."
}

View file

@ -1,82 +0,0 @@
{
"date": "2026-03-27",
"candidate": "OptimizerV3",
"candidate_commit": "b161faaee239cf0435ec9e436ad1af217c394a13",
"optimizer_profile": "push3-default",
"lm_eth_before": 999999999999999999998,
"lm_eth_after": 999999999999999999998,
"eth_extracted": 0,
"floor_held": true,
"verdict": "floor_held",
"strategies_tested": 7,
"duration_seconds": 2519,
"methodology": "bootstrap-light + adversarial Claude agent (claude -p --dangerously-skip-permissions), 7 strategies with snapshot-revert isolation. Raw session data from stream-json output.",
"attacks": [
{
"strategy": "Buy→Recenter→Sell (Classic IL Crystallization)",
"pattern": "buy → recenter → sell",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 24,
"insight": "The 1% swap fee on both legs (~4 ETH total) exceeds the IL from repositioning a single anchor traversal. With AW=50 (anchorSpacing=3600 ticks), the anchor is wide and IL per tick is small. Fee income dominates decisively."
},
{
"strategy": "Parasitic LP + Fee Siphoning",
"pattern": "buy → add_lp → buy → recenter → sell → recenter",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 1740,
"insight": "Parasitic LP captures some fees from swaps but doesn't extract ETH from LM. The massive buy (600 ETH total) put 600 ETH INTO the pool, and the LM captured ~6 ETH in fees. The sell couldn't push through the floor position (massive liquidity at [127400,127600])."
},
{
"strategy": "Extreme Buy→Recenter→Sell (Maximum Price Push)",
"pattern": "buy → recenter → sell",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 7338,
"insight": "1500 ETH buy pushed through anchor AND into discovery. After recenter, the floor at [122800,123000] with 75% of ETH created an impenetrable wall. With 103e24 KRK unsellable, the adversary lost ~734 ETH permanently."
},
{
"strategy": "Multi-Cycle Small Ratchet",
"pattern": "buy → recenter_multi → sell → recenter_multi",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 37,
"insight": "Multiple small cycles don't compound IL faster than fee income. Each buy adds ~0.5 ETH in fees to LM (1% of 50 ETH). The floor position consistently blocks sell pressure. The 1% fee acts as a friction ratchet that always benefits the LM."
},
{
"strategy": "Staking Manipulation + Optimizer Shift",
"pattern": "buy → stake → recenter → sell",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 30783,
"insight": "Buying 3520 ETH for staking deposited massive ETH into the LM. Optimizer shift created tight anchor (AW=11, ~175 ETH) easy to push through, but floor (95% of ETH, 200 ticks wide, liq=2.04e26) was impenetrable. Fatal flaw: KRK needed for >91% staking can only come from the pool, depositing massive ETH."
},
{
"strategy": "Large buy → recenter → large sell (IL crystallization)",
"pattern": "buy → recenter_multi → sell",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 0,
"insight": "Early iteration of Strategy 1. Subsumed by the classic IL crystallization attempt."
},
{
"strategy": "Multi-cycle IL ratchet with parasitic LP",
"pattern": "buy → add_lp → sell → recenter_multi",
"outcome": "HELD",
"eth_extracted": 0,
"floor_held_for_attack": true,
"delta_bps": 0,
"insight": "Early iteration of parasitic LP approach. KRK sell failed due to insufficient liquidity to push through floor. Subsumed by revised parasitic LP strategy."
}
],
"attack_suite_count": 7,
"summary": "Floor held under all 7 adversarial strategies. All reverted to clean baseline — no extraction vector found. The 1% fee moat, floor position defense (75-95% of LM ETH in 200 ticks), ETH-neutral recenter, directional VWAP defense, and the chicken-and-egg problem (KRK acquisition requires ETH deposit) provide layered defense.",
"exit_code": 0
}

View file

@ -1,96 +0,0 @@
{
"date": "2026-03-25",
"candidate_commit": "491755592a86b34f7761347cd8cc299652b02942",
"methodology": "Playwright headless chromium (1280x720) against local full stack (anvil + webapp + ponder + caddy). Each persona spec runs sequentially with chain state reset between runs via evm_snapshot/evm_revert.",
"personas": [
{
"name": "tyler",
"task": "passive-holder funnel: land → connect wallet → buy KRK → hold",
"completed": false,
"friction_points": [
"Wallet connector panel (.connectors-element) not visible after clicking mobile login icon — timeout at 10s",
"Desktop connect button not found at 1280x720 viewport — fell through to mobile fallback path"
],
"screenshot_refs": [
"test-results/usertest/tyler/tyler-landing-page-2026-03-25T07-35-11-729Z.png"
],
"notes": "Tyler skipped docs and went straight to connect wallet. Observed: 'Cool looking app! Let's goooo'. Copy feedback: 'Needs bigger BUY NOW button on landing page'. Blocked at wallet connection step."
},
{
"name": "alex",
"task": "passive-holder funnel: land → understand DeFi → connect wallet → buy KRK",
"completed": false,
"friction_points": [
"No 'New to DeFi?' or tutorial section on landing page for newcomers",
"No trust signals (Audited, Secure, Non-custodial badges) to reassure first-time users",
"Wallet connector panel (.connectors-element) not visible — timeout at 10s",
"Wallet connection errors lack beginner-friendly explanations"
],
"screenshot_refs": [
"test-results/usertest/alex/alex-landing-page-2026-03-25T07-33-50-415Z.png",
"test-results/usertest/alex/alex-looking-for-help-2026-03-25T07-33-51-890Z.png"
],
"notes": "Alex spent 7s on landing page looking for help/tutorials. Observed: 'This looks professional but I have no idea what I'm looking at'. Tokenomics question: 'What is staking? How do I make money from this?'. Gave up after wallet connection failed."
},
{
"name": "sarah",
"task": "passive-holder funnel: land → research → connect wallet → evaluate yield",
"completed": false,
"friction_points": [
"Landing page does not explain 'What is Harberger tax?' in simple terms",
"No About, Docs, or Team page found before wallet connection",
"Wallet connector panel (.connectors-element) not visible — timeout at 10s"
],
"screenshot_refs": [
"test-results/usertest/sarah/sarah-landing-page-2026-03-25T07-34-52-636Z.png",
"test-results/usertest/sarah/sarah-looking-for-info-2026-03-25T07-34-53-821Z.png"
],
"notes": "Sarah read the landing page carefully before connecting. Observed: 'Reading landing page carefully before connecting wallet', 'Looking for About, Docs, or Team page before doing anything else'. Blocked at wallet connection."
},
{
"name": "priya",
"task": "staker funnel: land → analyze mechanism design → connect wallet → evaluate staking",
"completed": false,
"friction_points": [
"No whitepaper, technical appendix, or formal specification found from app UI",
"No governance structure, DAO participation, or admin key disclosures visible",
"Wallet connector panel (.connectors-element) not visible — timeout at 10s"
],
"screenshot_refs": [
"test-results/usertest/priya/priya-landing-page-2026-03-25T07-34-31-771Z.png",
"test-results/usertest/priya/priya-searching-for-docs-2026-03-25T07-34-33-347Z.png"
],
"notes": "Priya found audit link but wanted full report. Tokenomics questions: 'What is the theoretical Nash equilibrium for tax rates?', 'What are the centralization risks? Who holds admin keys? Is there a timelock?'. Blocked at wallet connection."
},
{
"name": "marcus",
"task": "staker funnel: land → probe for exploits → connect wallet → test edge cases",
"completed": false,
"friction_points": [
"No 'Audited by X' badge prominently displayed on landing page",
"Wallet connector panel (.connectors-element) not visible — timeout at 10s"
],
"screenshot_refs": [
"test-results/usertest/marcus/marcus-landing-page-2026-03-25T07-34-13-018Z.png"
],
"notes": "Marcus immediately skeptical — 'what's the catch?'. Copy feedback: 'Landing page needs Audited by X badge prominently displayed'. Tokenomics question: 'What prevents someone from flash-loaning to manipulate VWAP?'. Blocked at wallet connection."
}
],
"personas_completed": 0,
"personas_total": 5,
"critical_friction_points": [
"Wallet connector panel (.connectors-element) not rendering after clicking connect button at 1280x720 viewport — all 5 personas blocked",
"Desktop connect button (.connect-button--disconnected) not visible at 1280x720 — tests fall through to mobile login icon path which also fails",
"No onboarding/tutorial content for DeFi newcomers (alex, sarah)",
"No prominent audit badge or trust signals (marcus, alex)",
"No whitepaper or formal mechanism specification accessible from UI (priya)"
],
"verdict": "fail",
"raw_reports": {
"tyler": "tmp/usertest-results/tyler-bags-morrison.json",
"alex": "tmp/usertest-results/alex-rivera.json",
"sarah": "tmp/usertest-results/sarah-park.json",
"priya": "tmp/usertest-results/dr-priya-malhotra.json",
"marcus": "tmp/usertest-results/marcus-flash-chen.json"
}
}

View file

@ -1,163 +0,0 @@
{
"date": "2026-03-26",
"candidate_commit": "9135b8696eb791d131ccd45ec06d3a9ce137f1e5",
"context": "Post-wallet-fix verification run. PR #1160 (merged 2026-03-25) fixed test wallet provider: eth_accounts and getProviderState now return empty arrays when not connected, preventing wagmi auto-connect that was hiding the connector panel.",
"methodology": "Playwright headless chromium (1280x720) against local full stack (anvil + postgres + ponder + webapp + caddy). Each persona spec runs sequentially with chain state reset between runs via evm_snapshot/evm_revert. Test timeout set to 120s.",
"personas": [
{
"name": "tyler",
"display": "Tyler 'Bags' Morrison",
"funnel": "passive-holder",
"task": "passive-holder funnel: land → connect wallet → buy KRK → stake → hold",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 10 ETH", "Buy KRK with 4.0 ETH total"],
"actions_failed": ["Stake 50 KRK at 5% tax"],
"friction_points": [
"Staking failed: /stakestake navigation bug — attemptStake helper constructs URL by appending 'stake' to current base URL, producing invalid route",
"No buy button visible on main page — had to navigate to Cheats page",
"Tax rate concept confusing — 'Am I PAYING tax or EARNING tax?'",
"No Discord or community links visible"
],
"screenshot_refs": [
"test-results/usertest/tyler/tyler-landing-page-2026-03-26T07-41-46-965Z.png",
"test-results/usertest/tyler/tyler-wallet-connected-2026-03-26T07-41-49-679Z.png",
"test-results/usertest/tyler/tyler-bought-krk-2026-03-26T07-41-57-952Z.png",
"test-results/usertest/tyler/tyler-stake-failed-2026-03-26T07-42-24-494Z.png"
],
"notes": "Wallet connection worked immediately via desktop button. Tyler completed buy flow successfully. Staking failed due to navigation bug (not wallet-related). Test passed."
},
{
"name": "alex",
"display": "Alex Rivera",
"funnel": "passive-holder",
"task": "passive-holder funnel: land → understand DeFi → connect wallet → buy KRK → stake",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet (first time)", "Mint 5 ETH (following guide)", "Buy KRK with 0.05 ETH (minimal test)"],
"actions_failed": ["Stake 25 KRK at 15% tax"],
"friction_points": [
"No 'New to DeFi?' or tutorial section for newcomers",
"No trust signals (Audited, Secure, Non-custodial badges)",
"Staking failed: /stakestake navigation bug",
"DeFi jargon overwhelming: VWAP, tax rate, snatching, claimed slots"
],
"screenshot_refs": [
"test-results/usertest/alex/alex-landing-page-2026-03-26T07-40-33-088Z.png",
"test-results/usertest/alex/alex-wallet-connected-2026-03-26T07-40-37-908Z.png",
"test-results/usertest/alex/alex-small-purchase-2026-03-26T07-40-53-288Z.png",
"test-results/usertest/alex/alex-stake-failed-2026-03-26T07-41-15-940Z.png"
],
"notes": "Wallet connection worked first try via desktop button. Purchase flow smooth. Snatching concept 'TERRIFYING for newcomers'. Test passed."
},
{
"name": "sarah",
"display": "Sarah Park",
"funnel": "passive-holder",
"task": "passive-holder funnel: land → research → connect wallet → evaluate yield → stake",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 20 ETH", "Buy KRK with 0.05 ETH (test)", "Buy KRK with 3.0 ETH total"],
"actions_failed": ["Stake 50 KRK at 15% tax"],
"friction_points": [
"Landing page does not explain 'What is Harberger tax?' in simple terms",
"No audit badge visible",
"Staking failed: /stakestake navigation bug",
"No return calculator for estimated APY at different tax rates"
],
"screenshot_refs": [
"test-results/usertest/sarah/sarah-landing-page-2026-03-26T07-44-58-497Z.png",
"test-results/usertest/sarah/sarah-wallet-connected-2026-03-26T07-45-01-350Z.png",
"test-results/usertest/sarah/sarah-test-purchase-complete-2026-03-26T07-45-15-223Z.png",
"test-results/usertest/sarah/sarah-stake-error-2026-03-26T07-45-44-256Z.png"
],
"notes": "Wallet connection worked via desktop button. Both test and main purchase succeeded. Compares unfavorably to Aave's simplicity. Test passed."
},
{
"name": "priya",
"display": "Dr. Priya Malhotra",
"funnel": "staker",
"task": "staker funnel: land → analyze mechanism design → connect wallet → evaluate staking",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 100 ETH", "Buy KRK with 5.0 ETH (institutional test)"],
"actions_failed": ["Stake 500 KRK at 12% tax"],
"friction_points": [
"No whitepaper, technical appendix, or formal specification accessible from UI",
"No governance structure, DAO participation, or admin key disclosures visible",
"Staking failed: /stakestake navigation bug",
"Insufficient liquidity depth for institutional positions (>$100k)"
],
"screenshot_refs": [
"test-results/usertest/priya/priya-landing-page-2026-03-26T07-44-02-828Z.png",
"test-results/usertest/priya/priya-wallet-connected-2026-03-26T07-44-05-677Z.png",
"test-results/usertest/priya/priya-large-swap-complete-2026-03-26T07-44-19-566Z.png",
"test-results/usertest/priya/priya-final-analysis-2026-03-26T07-44-52-056Z.png"
],
"notes": "Wallet connection worked via desktop button. Institutional-size swap completed. Would allocate $50-100k for observation. Test passed."
},
{
"name": "marcus",
"display": "Marcus 'Flash' Chen",
"funnel": "staker",
"task": "staker funnel: land → probe for exploits → connect wallet → test edge cases",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 50 ETH", "Buy KRK with 0.01 ETH (test)", "Buy KRK with 5 ETH"],
"actions_failed": ["Stake 100 KRK at 5% tax"],
"friction_points": [
"No 'Audited by X' badge prominently displayed",
"Staking failed: /stakestake navigation bug",
"No snatching ROI calculator or profitability tool",
"Contract addresses not easily visible for verification"
],
"screenshot_refs": [
"test-results/usertest/marcus/marcus-landing-page-2026-03-26T07-42-55-063Z.png",
"test-results/usertest/marcus/marcus-wallet-connected-2026-03-26T07-42-56-829Z.png",
"test-results/usertest/marcus/marcus-large-swap-complete-2026-03-26T07-43-13-650Z.png",
"test-results/usertest/marcus/marcus-final-dashboard-2026-03-26T07-43-57-381Z.png"
],
"notes": "Wallet connection worked via desktop button. Both small and large swaps completed. Intrigued by snatching PvP mechanics. Test passed."
}
],
"personas_completed": 5,
"personas_total": 5,
"wallet_connections_succeeded": 5,
"wallet_connections_total": 5,
"fix_verification": {
"pr": "#1160",
"fix_description": "Test wallet provider eth_accounts and getProviderState now return empty arrays when not connected, preventing wagmi auto-connect",
"previous_result": "0/5 personas completing — all blocked at wallet connector panel not rendering",
"current_result": "5/5 personas completing — all wallet connections succeeded via desktop Connect button",
"fix_status": "verified_working"
},
"new_issue_discovered": {
"description": "attemptStake helper navigates to /stakestake (invalid route) instead of /stake — Vue Router warns 'No match found for location with path /stakestake'",
"root_cause": "helpers.ts attemptStake() appends 'stake' to current page.url().split('#')[0] base URL which already ends in /stake, producing /stakestake",
"impact": "All 5 personas fail staking step (non-blocking — tests complete gracefully)",
"severity": "medium"
},
"critical_friction_points": [
"Staking navigation bug: /stakestake invalid route blocks all stake attempts (test infrastructure issue, not wallet-related)",
"No onboarding/tutorial content for DeFi newcomers (alex, sarah)",
"No prominent audit badge or trust signals (marcus, alex, sarah)",
"No whitepaper or formal mechanism specification accessible from UI (priya)",
"Tax rate concept confusing without guidance (tyler, alex, sarah)",
"Snatching concept frightening without explanation (tyler, alex, sarah)"
],
"verdict": "pass",
"verdict_detail": "Wallet connector fix (PR #1160) fully verified — 5/5 personas now connect successfully (previously 0/5). All personas complete their test journeys including wallet connection, ETH minting, and KRK purchase. Staking step fails for all due to a separate navigation bug (/stakestake URL), which is a test infrastructure issue not related to the wallet connector fix.",
"comparison": {
"previous_date": "2026-03-25",
"previous_completed": 0,
"current_completed": 5,
"improvement": "0/5 → 5/5 (wallet connector fix resolved the blocking issue)"
},
"raw_reports": {
"tyler": "tmp/usertest-results/tyler-bags-morrison.json",
"alex": "tmp/usertest-results/alex-rivera.json",
"sarah": "tmp/usertest-results/sarah-park.json",
"priya": "tmp/usertest-results/dr-priya-malhotra.json",
"marcus": "tmp/usertest-results/marcus-flash-chen.json"
}
}

View file

@ -1,172 +0,0 @@
{
"date": "2026-03-27",
"candidate_commit": "f96ca9ddb4f7cd7210ca47dbac755404ee93cdbe",
"context": "Post-attemptStake-fix verification run. PR #1171 (merged 2026-03-26) fixed the /stakestake navigation bug in attemptStake helper: now uses new URL(page.url()).origin + '/stake' instead of appending 'stake' to the current URL. This run verifies that staking navigation works correctly after the fix.",
"methodology": "Playwright headless chromium (1280x720) against local full stack (anvil + postgres + ponder + webapp + caddy). Each persona spec runs sequentially with chain state reset between runs via evm_snapshot/evm_revert. Test timeout set to 120s. Stack started with anvil port override (18545 host) due to port conflict.",
"personas": [
{
"name": "tyler",
"display": "Tyler 'Bags' Morrison",
"funnel": "passive-holder",
"task": "passive-holder funnel: land -> connect wallet -> buy KRK -> stake -> hold",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 10 ETH"],
"actions_failed": ["Buy KRK (deployer balance exceeded)", "Stake 50 KRK at 5% tax (stake form timeout: slider not visible within 15s, 504 Gateway Timeout on ponder API)"],
"friction_points": [
"Buy failed: deployer KRK balance exceeded after chain state resets",
"Stake form did not fully load (504 Gateway Timeout from ponder during page load)",
"No buy button visible on main page - had to navigate to Cheats page",
"Tax rate concept confusing - 'Am I PAYING tax or EARNING tax?'",
"No Discord or community links visible"
],
"screenshot_refs": [
"test-results/usertest/tyler/tyler-landing-page-2026-03-27T08-37-28-808Z.png",
"test-results/usertest/tyler/tyler-wallet-connected-2026-03-27T08-37-34-925Z.png",
"test-results/usertest/tyler/tyler-buy-error-2026-03-27T08-37-39-818Z.png",
"test-results/usertest/tyler/tyler-stake-page-2026-03-27T08-37-49-765Z.png",
"test-results/usertest/tyler/tyler-stake-failed-2026-03-27T08-38-08-065Z.png"
],
"notes": "Wallet connection succeeded. Navigate to /stake worked correctly (fix verified - no /stakestake). Stake form failed to load due to 504 Gateway Timeout on ponder API, not navigation. Test PASSED (non-blocking failure)."
},
{
"name": "alex",
"display": "Alex Rivera",
"funnel": "passive-holder",
"task": "passive-holder funnel: land -> understand DeFi -> connect wallet -> buy KRK -> stake",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet (first time)", "Mint 5 ETH"],
"actions_failed": ["Buy KRK (JsonRpcProvider failed to detect network)", "Stake 25 KRK at 15% tax (stake form timeout: slider not visible within 15s, 504 Gateway Timeout)"],
"friction_points": [
"No 'New to DeFi?' or tutorial section for newcomers",
"No trust signals (Audited, Secure, Non-custodial badges)",
"Stake form did not load (504 Gateway Timeout from ponder)",
"DeFi jargon overwhelming: VWAP, tax rate, snatching, claimed slots"
],
"screenshot_refs": [
"test-results/usertest/alex/alex-landing-page-2026-03-27T08-38-46-172Z.png",
"test-results/usertest/alex/alex-wallet-connected-2026-03-27T08-38-51-277Z.png",
"test-results/usertest/alex/alex-stake-failed-2026-03-27T08-39-24-854Z.png"
],
"notes": "Wallet connection worked first try. Navigate to /stake worked correctly (fix verified - no /stakestake). Stake form failed to load due to 504 Gateway Timeout. Buy failed due to RPC network detection issues after chain revert. Test PASSED."
},
{
"name": "sarah",
"display": "Sarah Park",
"funnel": "passive-holder",
"task": "passive-holder funnel: land -> research -> connect wallet -> evaluate yield -> stake",
"completed": false,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 20 ETH"],
"actions_failed": ["Buy KRK (contract balanceOf returned empty data after chain revert)"],
"friction_points": [
"Landing page does not explain 'What is Harberger tax?' in simple terms",
"No audit badge visible",
"Test crashed on buyKrk: balanceOf returned 0x after chain snapshot/revert cycle"
],
"screenshot_refs": [
"test-results/usertest/sarah/sarah-landing-page-2026-03-27T08-40-05-846Z.png",
"test-results/usertest/sarah/sarah-wallet-connected-2026-03-27T08-40-14-067Z.png"
],
"notes": "Wallet connection succeeded. Test FAILED due to contract state issue after chain revert (balanceOf returned empty data). This is a test infrastructure issue with evm_snapshot/evm_revert, not related to the stake navigation fix. Did not reach stake step."
},
{
"name": "priya",
"display": "Dr. Priya Malhotra",
"funnel": "staker",
"task": "staker funnel: land -> analyze mechanism design -> connect wallet -> evaluate staking",
"completed": true,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 100 ETH"],
"actions_failed": ["Buy KRK (balanceOf returned empty data after chain revert)", "Stake 500 KRK at 12% tax (stake form timeout: slider not visible within 15s, 504 Gateway Timeout)"],
"friction_points": [
"No whitepaper, technical appendix, or formal specification accessible from UI",
"No governance structure, DAO participation, or admin key disclosures visible",
"Stake form did not load (504 Gateway Timeout from ponder)",
"Insufficient liquidity depth for institutional positions (>$100k)"
],
"screenshot_refs": [
"test-results/usertest/priya/priya-landing-page-2026-03-27T08-41-02-959Z.png",
"test-results/usertest/priya/priya-wallet-connected-2026-03-27T08-41-07-293Z.png",
"test-results/usertest/priya/priya-stake-dashboard-2026-03-27T08-41-13-263Z.png",
"test-results/usertest/priya/priya-final-analysis-2026-03-27T08-41-59-057Z.png"
],
"notes": "Wallet connection succeeded. Navigate to /stake worked correctly (fix verified - no /stakestake). Stake form failed to load due to 504 Gateway Timeout. Buy failed due to contract state issue after chain revert. Test PASSED (graceful failure handling)."
},
{
"name": "marcus",
"display": "Marcus 'Flash' Chen",
"funnel": "staker",
"task": "staker funnel: land -> probe for exploits -> connect wallet -> test edge cases",
"completed": false,
"wallet_connected": true,
"actions_succeeded": ["Connect wallet", "Mint 50 ETH"],
"actions_failed": ["Buy KRK (ERC20: transfer amount exceeds deployer balance after chain revert)"],
"friction_points": [
"No 'Audited by X' badge prominently displayed",
"Contract addresses not easily visible for verification",
"Test crashed on buyKrk: deployer balance exhausted after multiple chain reverts"
],
"screenshot_refs": [
"test-results/usertest/marcus/marcus-landing-page-2026-03-27T08-42-26-011Z.png",
"test-results/usertest/marcus/marcus-wallet-connected-2026-03-27T08-42-30-090Z.png",
"test-results/usertest/marcus/marcus-cheats-page-2026-03-27T08-42-36-514Z.png"
],
"notes": "Wallet connection succeeded. Test FAILED due to deployer KRK balance exceeded after chain snapshot/revert cycles. This is a test infrastructure issue, not related to the stake navigation fix. Did not reach stake step."
}
],
"personas_completed": 3,
"personas_total": 5,
"wallet_connections_succeeded": 5,
"wallet_connections_total": 5,
"fix_verification": {
"pr": "#1171",
"issue": "#1168",
"fix_description": "attemptStake helper now uses new URL(page.url()).origin + '/stake' instead of appending 'stake' to the current URL path, which previously produced the invalid route /stakestake",
"previous_result": "0/5 personas completing staking — all blocked at /stakestake invalid route (2026-03-26 evidence)",
"current_result": "Navigation to /stake confirmed working for all 3 personas that reached the stake step (tyler, alex, priya). The /stakestake bug is eliminated.",
"fix_status": "verified_working",
"remaining_blocker": "Stake form elements do not render within timeout (15s). Root cause: 504 Gateway Timeout on ponder GraphQL API during stake page load. This is an infrastructure/ponder issue, not a navigation bug."
},
"new_issues_discovered": [
{
"description": "Stake page returns 504 Gateway Timeout from ponder API, preventing the stake form (slider, amount input) from rendering",
"root_cause": "Ponder indexer may be overloaded or timing out on GraphQL queries needed by the stake page. The 504 comes through caddy proxy.",
"impact": "All personas that reach /stake cannot complete staking (form elements never appear)",
"severity": "high"
},
{
"description": "Chain snapshot/revert cycle causes contract state corruption: balanceOf returns empty 0x data, and deployer KRK balance is not properly restored",
"root_cause": "evm_snapshot/evm_revert in anvil may not fully restore contract storage or the snapshot ID management in helpers.ts has edge cases with multiple sequential reverts",
"impact": "2/5 persona tests crash during buyKrk step; other personas see transfer failures",
"severity": "medium"
}
],
"critical_friction_points": [
"Stake form 504 timeout: ponder API times out, preventing stake page from loading (all personas)",
"No onboarding/tutorial content for DeFi newcomers (alex, sarah)",
"No prominent audit badge or trust signals (marcus, alex, sarah)",
"No whitepaper or formal mechanism specification accessible from UI (priya)",
"Tax rate concept confusing without guidance (tyler, alex, sarah)",
"Snatching concept frightening without explanation (tyler, alex, sarah)"
],
"verdict": "partial_pass",
"verdict_detail": "The /stakestake navigation bug (issue #1168, PR #1171) is VERIFIED FIXED. All 3 personas that reached the stake step (tyler, alex, priya) successfully navigated to /stake with no invalid route error. However, a new blocker emerged: the ponder GraphQL API returns 504 Gateway Timeout when the stake page loads, preventing the stake form from rendering. 0/5 personas completed an actual on-chain stake transaction. 2/5 tests failed outright due to chain state corruption from evm_snapshot/evm_revert cycles (infrastructure issue). Wallet connections: 5/5 succeeded.",
"comparison": {
"previous_date": "2026-03-26",
"previous_evidence": "2026-03-26-post-wallet-fix.json",
"previous_navigation_bug": "/stakestake (present in all 5 personas)",
"current_navigation_bug": "None — /stake navigation works correctly",
"previous_staking_completed": 0,
"current_staking_completed": 0,
"improvement": "Navigation fix verified working. Staking blocked by different issue (ponder 504 timeout, not navigation)."
},
"raw_reports": {
"tyler": "tmp/usertest-results/tyler-bags-morrison.json",
"alex": "tmp/usertest-results/alex-rivera.json",
"sarah": "tmp/usertest-results/sarah-park.json",
"priya": "tmp/usertest-results/dr-priya-malhotra.json",
"marcus": "tmp/usertest-results/marcus-flash-chen.json"
}
}

View file

@ -1,146 +0,0 @@
{
"date": "2026-03-28",
"test_type": "ponder-504-persistence-check",
"issue_ref": "#1186",
"prediction_ref": "#1185",
"context": "Targeted re-test to determine if the ponder 504 Gateway Timeout observed on 2026-03-27 is persistent or intermittent. Fresh stack startup with cold ponder indexer. All 5 personas run sequentially.",
"methodology": "Fresh stack started from clean state (no prior containers). Ponder health probed at multiple stages: pre-test (8 probes), mid-test (implicit via persona runs), post-test (3 probes). Playwright headless Chromium (1280x720) with 120s test timeout, 15s slider wait timeout.",
"stack_startup": {
"anvil_ready_s": 7,
"postgres_ready_s": 0,
"bootstrap_completed_s": 45,
"ponder_ready_s": 27,
"webapp_ready_s": 180,
"caddy_ready_s": 5,
"note": "Webapp initially timed out at 120s (npm install inside container), succeeded on second health check after ~180s total"
},
"ponder_health": {
"at_test_start": {
"healthy": true,
"probes": [
{"source": "direct_42069", "http_code": 200, "latency_ms": 131},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 43},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 19},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 18},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 17},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 22},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 15}
],
"all_200": true,
"max_latency_ms": 131,
"avg_latency_ms": 38
},
"at_test_end": {
"healthy": true,
"probes": [
{"source": "caddy_8081", "http_code": 200, "latency_ms": 33},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 16},
{"source": "caddy_8081", "http_code": 200, "latency_ms": 14}
],
"all_200": true,
"max_latency_ms": 33,
"avg_latency_ms": 21
},
"504_errors_observed": 0,
"conclusion": "Ponder GraphQL API is healthy and responsive throughout the entire test run. No 504 errors observed at any point. The 504 from 2026-03-27 is NOT reproducible on fresh stack start."
},
"stake_page": {
"html_loads": true,
"html_load_ms": 35,
"slider_renders": false,
"slider_timeout_s": 15,
"browser_error": "Failed to fetch protocol stats: SyntaxError: Unexpected token '<', \"<!--\\n * C\"... is not valid JSON",
"root_cause": "The webapp stake page fetches protocol stats from a URL that returns SPA HTML fallback instead of JSON. This is a webapp routing/fetch issue, NOT a ponder 504. Ponder itself responds correctly to direct GraphQL queries."
},
"personas": [
{
"name": "alex",
"display": "Alex Rivera",
"funnel": "passive-holder",
"wallet_connected": true,
"buy_krk": true,
"staking_attempted": true,
"staking_completed": false,
"stake_failure_reason": "Slider not visible within 15s (webapp fetch error, NOT ponder 504)",
"test_passed": true
},
{
"name": "marcus",
"display": "Marcus 'Flash' Chen",
"funnel": "staker",
"wallet_connected": true,
"buy_krk": true,
"staking_attempted": true,
"staking_completed": false,
"stake_failure_reason": "Slider not visible within 15s (webapp fetch error, NOT ponder 504)",
"test_passed": true
},
{
"name": "priya",
"display": "Dr. Priya Malhotra",
"funnel": "staker",
"wallet_connected": true,
"buy_krk": true,
"staking_attempted": true,
"staking_completed": false,
"stake_failure_reason": "Slider not visible within 15s (webapp fetch error, NOT ponder 504)",
"test_passed": true
},
{
"name": "sarah",
"display": "Sarah Park",
"funnel": "passive-holder",
"wallet_connected": true,
"buy_krk": true,
"staking_attempted": true,
"staking_completed": false,
"stake_failure_reason": "Slider not visible within 15s (webapp fetch error, NOT ponder 504)",
"test_passed": true
},
{
"name": "tyler",
"display": "Tyler 'Bags' Morrison",
"funnel": "passive-holder",
"wallet_connected": true,
"buy_krk": true,
"staking_attempted": true,
"staking_completed": false,
"stake_failure_reason": "Slider not visible within 15s (webapp fetch error, NOT ponder 504)",
"test_passed": true
}
],
"summary": {
"personas_total": 5,
"personas_completed": 5,
"wallet_connections_succeeded": 5,
"buy_krk_succeeded": 5,
"staking_completed": 0,
"staking_attempted": 5,
"test_duration_s": 288
},
"comparison_with_previous": {
"previous_date": "2026-03-27",
"previous_evidence": "2026-03-27-post-stake-fix.json",
"previous_504_errors": "multiple (all personas that reached /stake)",
"current_504_errors": 0,
"previous_staking_completed": 0,
"current_staking_completed": 0,
"previous_buy_krk_failures": 4,
"current_buy_krk_failures": 0,
"improvements": [
"No 504 Gateway Timeout errors (ponder healthy throughout)",
"All 5 personas successfully bought KRK (chain snapshot/revert issues from 2026-03-27 not reproduced)",
"All 5 test specs passed (previously 2 crashed)"
],
"remaining_blocker": "Stake form slider does not render. Root cause shifted from ponder 504 to webapp protocol-stats fetch returning HTML instead of JSON."
},
"verdict": "ponder_504_not_persistent",
"verdict_detail": "The ponder 504 Gateway Timeout from 2026-03-27 is NOT persistent. On fresh stack start, ponder responds in <50ms with 200 OK consistently. The staking form still does not render, but the root cause is now identified as a webapp fetch issue (protocol stats endpoint returns SPA HTML fallback), not a ponder backend error. 0/5 personas completed staking. The chain snapshot/revert issues from 2026-03-27 were also not reproduced.",
"raw_reports": {
"alex": "tmp/usertest-results/alex-rivera.json",
"marcus": "tmp/usertest-results/marcus-flash-chen.json",
"priya": "tmp/usertest-results/dr-priya-malhotra.json",
"sarah": "tmp/usertest-results/sarah-park.json",
"tyler": "tmp/usertest-results/tyler-bags-morrison.json"
}
}

View file

@ -1,139 +0,0 @@
<!-- last-reviewed: baa501fa46355f7b04bffdf386d397ad19f69298 -->
# Agent Brief: Formulas
Formulas are TOML files that declare automated pipeline jobs for the harb evaluator.
Each formula describes **what** to run, **when**, and **what it produces** — the
orchestrator reads the TOML and dispatches execution to the scripts referenced in
`[execution]`.
## Sense vs Act
Every formula has a `type` field. Getting this wrong breaks orchestrator scheduling
and evidence routing.
| Type | Meaning | Side-effects | Examples |
|------|---------|-------------|----------|
| `sense` | Read-only observation. Produces metrics / evidence only. | No PRs, no code changes, no contract deployments. | `run-holdout`, `run-protocol`, `run-resources`, `run-user-test` |
| `act` | Produces git artifacts: PRs, new files committed to main, contract upgrades. | Opens PRs, commits evidence + champion files, promotes attack vectors. | `run-evolution`, `run-red-team` |
**Rule of thumb:** if the formula's `deliver` step calls `git push` or opens a PR,
it is `act`. If it only commits an evidence JSON to main, it is `sense`.
## Current Formulas
| ID | Type | Script | Cron | Purpose |
|----|------|--------|------|---------|
| `run-evolution` | act | `tools/push3-evolution/evolve.sh` | — | Evolve Push3 optimizer candidates, admit champions to seed pool via PR |
| `run-holdout` | sense | `scripts/harb-evaluator/evaluate.sh` | — | Deploy PR branch, run blind holdout scenarios, report pass/fail |
| `run-protocol` | sense | `scripts/harb-evaluator/run-protocol.sh` | `0 7 * * *` | On-chain health snapshot (TVL, fees, positions, rebalances) |
| `run-red-team` | act | `scripts/harb-evaluator/red-team.sh` | — | Adversarial agent attacks the optimizer; promotes novel attack vectors via PR |
| `run-resources` | sense | `scripts/harb-evaluator/run-resources.sh` | `0 6 * * *` | Infrastructure snapshot (disk, RAM, API budget, CI queue) |
| `run-user-test` | sense | `scripts/run-usertest.sh` | — | Persona-based Playwright UX evaluation |
## Cron Conventions
- Schedules use standard 5-field cron syntax in `[cron] schedule`.
- Stagger by at least 1 hour to avoid resource contention (`run-resources` at 06:00, `run-protocol` at 07:00).
- Only `sense` formulas should be cron-scheduled. An `act` formula on a timer risks unattended PRs.
## Step ID Naming
Steps are declared as `[[steps]]` arrays. Each step must have an `id` field.
**Conventions:**
- Use lowercase kebab-case: `stack-up`, `run-scenarios`, `collect-tvl`.
- Prefix collection steps with `collect-` followed by the metric dimension: `collect-disk`, `collect-ram`, `collect-fees`.
- Every formula must include a `collect` step (assembles the evidence JSON) and a `deliver` step (commits + posts comment).
- Infrastructure lifecycle steps: `stack-up` / `stack-down` (or `boot-stack` / `teardown`).
- Use descriptive verbs: `run-attack-suite`, `evaluate-seeds`, `export-vectors`.
## TOML Structure
A formula file follows this skeleton:
```toml
# formulas/run-{name}.toml
#
# One-line description of what this formula does.
#
# Type: sense | act
# Cron: (schedule if applicable, or "—")
[formula]
id = "run-{name}"
name = "Human-Readable Name"
description = "What it does in one sentence."
type = "sense" # or "act"
# [cron] # optional — only for scheduled formulas
# schedule = "0 6 * * *"
[inputs.example_input]
type = "string" # string | integer | number
required = true
description = "What this input controls."
[execution]
script = "path/to/script.sh"
invocation = "ENV_VAR={example_input} bash path/to/script.sh"
[[steps]]
id = "do-something"
description = """
What this step does, in enough detail for a new contributor to understand.
"""
[[steps]]
id = "collect"
description = "Assemble metrics into evidence/{category}/{date}.json."
output = "evidence/{category}/{date}.json"
[[steps]]
id = "deliver"
description = "Commit evidence file and post summary comment to issue."
[products.evidence_file]
path = "evidence/{category}/{date}.json"
delivery = "commit to main"
schema = "evidence/README.md"
[resources]
profile = "light" # or "heavy"
concurrency = "safe to run in parallel" # or "exclusive"
```
## How to Add a New Formula
1. **Pick a name.** File goes in `formulas/run-{name}.toml`. The `[formula] id` must match: `run-{name}`.
2. **Decide sense vs act.** If your formula only reads state and writes evidence → `sense`. If it creates PRs, commits code, or modifies contracts → `act`.
3. **Write the TOML.** Follow the skeleton above. Key sections:
- `[formula]` — id, name, description, type.
- `[inputs.*]` — every tuneable parameter the script accepts.
- `[execution]` — script path and full invocation with `{input}` interpolation.
- `[[steps]]` — ordered list of logical steps. Always end with `collect` and `deliver`.
- `[products.*]` — what the formula produces (evidence file, PR, issue comment).
- `[resources]` — profile (`light` / `heavy`), concurrency constraints.
4. **Write or wire the backing script.** The `[execution] script` must exist and be executable. Most scripts live in `scripts/harb-evaluator/` or `tools/`. Exit codes: `0` = success, `1` = gate failed, `2` = infra error.
5. **Define the evidence schema.** If your formula writes `evidence/{category}/{date}.json`, add the schema to `evidence/README.md`.
6. **Update this file.** Add your formula to the "Current Formulas" table above.
7. **Test locally.** Run the backing script with the required inputs and verify the evidence file is well-formed JSON.
## Resource Profiles
| Profile | Meaning | Can run in parallel? |
|---------|---------|---------------------|
| `light` | Shell commands only (df, curl, cast). No Docker, no Anvil. | Yes — safe to run alongside anything. |
| `heavy` | Needs Anvil on port 8545, Docker containers, or long-running agents. | No — exclusive. Heavy formulas share port bindings and cannot overlap. |
## Evaluator Integration
Formula execution is dispatched by the orchestrator to scripts in
`scripts/harb-evaluator/`. See [scripts/harb-evaluator/AGENTS.md](../scripts/harb-evaluator/AGENTS.md)
for details on the evaluator runtime: stack lifecycle, scenario execution,
evidence collection, and the adversarial agent harness.

View file

@ -1,344 +0,0 @@
# formulas/run-evolution.toml
#
# Push3 optimizer evolution pipeline — evaluate seed pool, evolve a population
# of candidates, admit survivors back to the pool, deliver champions via PR.
#
# Type: act. Produces git artifacts (new .push3 champions + updated
# manifest.jsonl via PR to main; evidence file committed to main).
#
# Depends on: #973 (evidence/evolution/ directory structure)
[formula]
id = "run-evolution"
name = "Push3 Optimizer Evolution"
description = "Evaluate seed pool, evolve Push3 optimizer population, admit survivors, deliver champions via PR."
type = "act"
# "sense" → read-only, produces metrics only
# "act" → produces git artifacts (cf. run-red-team, run-evolution)
depends_on = [973]
# ── Inputs ─────────────────────────────────────────────────────────────────────
[inputs.seed]
type = "string"
required = false
default = "tools/push3-evolution/seeds/optimizer_v3.push3"
description = "Starting seed .push3 file (passed as --seed to evolve.sh). Serves as the fallback mutation source when the pool does not fill the full population."
[inputs.population]
type = "integer"
required = false
default = 10
description = "Number of candidates per generation (--population)."
[inputs.generations]
type = "integer"
required = false
default = 5
description = "Number of evolution generations to run (--generations)."
[inputs.mutation_rate]
type = "integer"
required = false
default = 2
description = "Mutations applied per candidate per generation (--mutation-rate)."
[inputs.elites]
type = "integer"
required = false
default = 2
description = "Top-scoring candidates carried forward unchanged each generation (--elites)."
[inputs.base_rpc_url]
type = "string"
required = true
description = """
Base network RPC endpoint forwarded as BASE_RPC_URL to both evaluate-seeds.sh
and evolve.sh. Required for the revm evaluator (default EVAL_MODE).
Example: https://mainnet.base.org or a fork URL from a running Anvil instance.
"""
[inputs.run_id]
type = "integer"
required = false
description = """
Override the run ID used when naming candidates admitted to the seed pool
(e.g. run009_gen2_c005.push3). Auto-incremented from the highest existing
run in manifest.jsonl when omitted (recommended).
"""
[inputs.attack_dir]
type = "string"
required = false
default = "onchain/script/backtesting/attacks"
description = """
Directory of .jsonl adversarial attack scenarios. Intended as an adversarial
fitness input candidates scored against these patterns in addition to the
revm fitness metric. Not yet forwarded to evolve.sh; documented here as a
forward spec.
"""
status = "planned"
# ── Execution ──────────────────────────────────────────────────────────────────
#
# Step 0 — evaluate-seeds.sh — runs before the main evolution loop.
# Scores any manifest.jsonl entries with fitness: null so the pool
# sampler has real fitness values when selecting gen_0 candidates.
#
# Steps 1-5 — evolve.sh — owns the full evolution lifecycle:
# 1. Initialise population: random sample from seed pool (--diverse-seeds).
# 2. Score candidates via revm batch evaluator (batch-eval.sh).
# 3. Tournament-select survivors; apply elitism + mutation / crossover.
# 4. Repeat for N generations; track global best.
# 5. Admit candidates above threshold (6e21 wei) into seeds/; rewrite manifest.
#
# evolve.sh always passes --diverse-seeds so gen_0 inherits pool diversity.
# --run-id is omitted to let evolve.sh auto-increment from manifest.jsonl.
[execution]
pre_script = "tools/push3-evolution/evaluate-seeds.sh"
pre_invocation = "BASE_RPC_URL={base_rpc_url} bash tools/push3-evolution/evaluate-seeds.sh"
script = "tools/push3-evolution/evolve.sh"
invocation = "BASE_RPC_URL={base_rpc_url} bash tools/push3-evolution/evolve.sh --seed {seed} --population {population} --generations {generations} --mutation-rate {mutation_rate} --elites {elites} --output tmp/evolution --diverse-seeds"
# Exit codes propagated by evolve.sh:
# 0 evolution complete; best candidate found and pool admission attempted
# 2 infrastructure error (RPC unreachable, missing tool, revm eval failed)
# ── Steps ──────────────────────────────────────────────────────────────────────
[[steps]]
id = "evaluate-seeds"
description = """
Score manifest entries with fitness: null before the evolution loop begins.
tools/push3-evolution/evaluate-seeds.sh:
- Reads tools/push3-evolution/seeds/manifest.jsonl.
- For every entry where fitness is null, runs fitness.sh against the
corresponding .push3 file and records the numeric score.
- Rewrites manifest.jsonl atomically (temp-file rename).
- Exits 0 when nothing to do (idempotent; safe to re-run).
- Exits 2 on infrastructure error (eval stack unreachable).
Primary targets: LLM-generated seeds (origin=llm) and evolved entries whose
fitness was nulled due to scoring inflation (fitness_flags: token_value_inflation,
processExecIf_fix). Real fitness values allow --diverse-seeds to weight the
gen_0 sample correctly.
"""
script = "tools/push3-evolution/evaluate-seeds.sh"
[[steps]]
id = "evolve"
description = """
Run the outer evolutionary loop via tools/push3-evolution/evolve.sh.
Initialisation (gen_0):
A random sample of up to {population} candidates is drawn from the seed pool
(tools/push3-evolution/seeds/); any shortfall is filled by mutating {seed}.
Seeds with unevaluated fitness (null) are included in the sample with equal
probability evaluate-seeds (step 0) should have resolved most of these.
Per-generation loop ({generations} iterations):
a. Score all candidates in a single forge test invocation via
tools/push3-evolution/revm-evaluator/batch-eval.sh (EVAL_MODE=revm).
Falls back to per-candidate fitness.sh (EVAL_MODE=anvil) if revm is
unavailable.
b. Log generation stats: min / max / mean fitness, best candidate file.
c. Tournament-select survivors (k = population / 2).
d. Elitism: carry the top {elites} candidates forward unchanged.
e. Fill remaining slots: mutate random survivors (first half) and apply
pairwise crossover (second half); fall back to copy on failure.
Output per run (tmp/evolution/run_NNN/):
generation_0.jsonl generation_N.jsonl per-candidate fitness records
best.push3 global champion
diff.txt constant delta vs seed
evolution.log full run transcript
Pool admission (after final generation):
Candidates scoring above 6e21 wei are deduplicated by content hash and
admitted to tools/push3-evolution/seeds/, named run{NNN}_gen{G}_c{C}.push3.
manifest.jsonl is rewritten atomically; the evolved pool is capped at 100
entries by fitness rank (hand-written / LLM seeds are always pinned).
"""
script = "tools/push3-evolution/evolve.sh"
output_dir = "tmp/evolution"
[[steps]]
id = "score-attacks"
description = """
[Planned] Score the champion against known adversarial attack scenarios in
{attack_dir}/*.jsonl via onchain/script/backtesting/AttackRunner.s.sol.
For each attack file:
- Replay the op sequence against a fresh Anvil snapshot.
- Record LM total ETH before and after.
- Emit one fitness adjustment: penalise the candidate's score if the
attack succeeds (floor broken), reward if the floor holds.
Results feed back into the adversarial fitness component candidates that
survive all known attacks rank higher in the evidence record.
Skipped when {attack_dir} is empty or AttackRunner is unavailable.
"""
status = "planned"
attack_source = "{attack_dir}/*.jsonl"
forge_script = "onchain/script/backtesting/AttackRunner.s.sol"
[[steps]]
id = "collect"
description = """
Aggregate evolve.sh outputs into evidence/evolution/{date}.json.
Reads:
- tmp/evolution/run_NNN/generation_N.jsonl per-generation fitness records
- tmp/evolution/run_NNN/best.push3 champion file
- tools/push3-evolution/seeds/manifest.jsonl admission results
Writes evidence/evolution/{date}.json conforming to the schema in
evidence/README.md ## Schema: evolution/YYYY-MM-DD.json.
Verdict: "improved" if best_fitness > best seed fitness in manifest before
the run; "no_improvement" otherwise.
"""
output = "evidence/evolution/{date}.json"
schema = "evidence/README.md"
[[steps]]
id = "cleanup"
description = """
Remove intermediate per-generation candidate files that are not part of the
final results. Only the following files are retained after this step:
tmp/evolution/run_NNN/best.push3 global champion
tmp/evolution/run_NNN/diff.txt constant delta vs seed
tmp/evolution/run_NNN/evolution.log full run transcript
tools/push3-evolution/seeds/run{NNN}_*.push3
top-N newly admitted seeds
( elites per generation)
Files removed:
tmp/evolution/run_NNN/generation_*.jsonl per-candidate fitness records
(already aggregated into evidence)
tmp/evolution/run_NNN/candidate_*.push3 intermediate per-generation
candidates that are not elites
Rationale: the evolution box reached 91% disk utilisation in run #1025 because
these intermediate files were never cleaned up. Aggregated fitness data is
preserved in evidence/evolution/{date}.json; the per-candidate .push3 files for
non-elite generations are not needed once the evidence file is written.
"""
[[steps]]
id = "deliver"
description = """
Commit results to a branch, push, open PR, then post summary comment.
ORDERING IS MANDATORY each sub-step must complete before the next begins.
Do NOT post to the issue before the PR URL is available.
1. CLEAN GIT STATE
Run `git checkout -- .` to discard any working-tree modifications that are
NOT part of the evolution results (e.g. .sol files left over from a prior
session, scratch files). Only stage files that belong to this run:
- evidence/evolution/{date}.json
- tools/push3-evolution/seeds/evo_run{NNN}_champion.push3
- tools/push3-evolution/seeds/manifest.jsonl
Verify `git diff --check` passes before committing.
2. COMMIT TO BRANCH
Create branch evidence/evolution-run-{run_id} from master.
Commit the staged result files with message:
"evo: run{NNN} results — fitness={best_fitness}"
The commit MUST include all three files above.
3. PUSH AND CREATE PR
Push the branch to origin.
Open a Codeberg PR targeting master:
Title: "evo: run{NNN} champion — fitness={best_fitness}"
Body: generation-by-generation table (gen, best, mean, worst fitness),
top-3 admitted candidates with fitness scores, constant diff vs
seed (from diff.txt), link to evidence file.
If `git push` or PR creation fails:
a. Post an error comment to the originating issue with the failure reason
and the path of the local evidence file.
b. Leave the issue OPEN.
c. Exit with a non-zero status do NOT proceed to step 4.
4. POST SUMMARY COMMENT (only after PR URL is confirmed)
Post a comment to the originating issue containing:
- Verdict (improved / no_improvement).
- Best fitness achieved and which generation it was found in.
- Admission count: N candidates added to seed pool.
- Link to the champion PR (required do not post without it).
- Link to evidence file committed in the PR.
- If no_improvement: best fitness achieved and seed pool size.
Do NOT close the issue in this step; closing is the orchestrator's
responsibility once the PR is merged.
"""
# ── Products ───────────────────────────────────────────────────────────────────
[products.evidence_file]
path = "evidence/evolution/{date}.json"
delivery = "PR to main (same PR as champion_files, on branch evidence/evolution-run-{run_id})"
schema = "evidence/README.md" # see ## Schema: evolution/YYYY-MM-DD.json
[products.champion_files]
path = "tools/push3-evolution/seeds/evo_run{NNN}_champion.push3"
# {NNN} is the auto-incremented run ID assigned by evolve.sh at runtime.
delivery = "PR to main"
note = "Only created when at least one candidate exceeds the admission threshold (6e21 wei)."
[products.manifest]
path = "tools/push3-evolution/seeds/manifest.jsonl"
delivery = "PR to main (same PR as champion_files)"
note = "Updated with newly admitted entries and fitness scores from evaluate-seeds."
[products.issue_comment]
delivery = "post to originating issue AFTER PR is created and URL is confirmed"
content = "verdict (improved/no_improvement), best fitness, generation found, admission count, link to champion PR (mandatory), link to evidence file"
on_pr_failure = "post error comment with failure reason and local evidence path; leave issue OPEN; do not close"
on_run_failure = "include best fitness achieved, last generation completed, full log available in tmp/evolution/run_NNN/evolution.log; do not close issue"
ordering_note = "The comment MUST NOT be posted before the PR URL exists. Closing the issue is the orchestrator's responsibility after PR merge, not this formula's."
# ── Resources ──────────────────────────────────────────────────────────────────
[resources]
profile = "heavy"
compute = "CPU + RAM intensive — transpile + compile + deploy + revm eval per candidate"
rpc = "Base network RPC (BASE_RPC_URL) for revm fork; or Anvil (EVAL_MODE=anvil)"
concurrency = "exclusive — revm evaluator and optional Anvil share port 8545 with run-holdout and run-red-team"
# ── Notes ──────────────────────────────────────────────────────────────────────
[notes]
no_uups_deployment = """
The evolution pipeline produces Push3 candidate files only no UUPS proxy
deployment step is wired. Candidates are scored in simulation (revm or Anvil)
and admitted to the seed pool for future runs. Deployment to a live chain is
out of scope until the champion passes holdout and red-team gates.
"""
eval_mode = """
Default EVAL_MODE is revm (batch-eval.sh): all candidates in a generation are
scored in a single forge test invocation against a Base fork, 10-100× faster
than per-candidate Anvil. Set EVAL_MODE=anvil to fall back to fitness.sh
(slower, but does not require BASE_RPC_URL if Anvil is already running).
Gas limit: revm evaluator runs at ~25 candidates × 100 trades per batch.
For larger populations, increase the batch budget in batch-eval.sh.
"""
adversarial_fitness = """
Adversarial fitness against attack scenarios ({attack_dir}/*.jsonl) is planned
but not yet implemented (score-attacks step is status=planned). Currently the
only fitness signal is the revm/Anvil metric from batch-eval.sh / fitness.sh.
When implemented, attack survival will penalise candidates whose floor breaks
under known attack patterns, biasing the population toward safer programs.
"""
fee_fitness = """
Fee optimization against in-market pool data is planned as a second fitness
dimension. Not yet implemented; tracked as a follow-up issue.
"""
pool_cap = """
The evolved seed pool is capped at 100 entries by fitness rank. Hand-written
(origin=hand-written) and LLM-generated (origin=llm) seeds are always pinned
regardless of fitness. Evolved entries below the pool floor are evicted when
new higher-scoring candidates are admitted. Raw fitness values are only
comparable within the same evaluation run; entries with fitness_flags
(token_value_inflation, processExecIf_fix) are ranked as fitness=0 for
admission and eviction purposes.
"""

View file

@ -1,138 +0,0 @@
# formulas/run-holdout.toml
#
# Holdout quality gate — deploy a PR branch, run blind holdout scenarios,
# report pass/fail.
#
# Type: sense-only. Produces metrics and a gate decision.
# Does NOT commit code, open PRs, or modify contracts.
#
# Depends on: #973 (evidence/holdout/ directory structure)
[formula]
id = "run-holdout"
name = "Holdout Quality Gate"
description = "Deploy PR branch, run blind holdout scenarios, report pass/fail."
type = "sense"
# "sense" → read-only, produces metrics only
# "act" → produces git artifacts (cf. run-evolution, run-red-team)
# ── Inputs ─────────────────────────────────────────────────────────────────────
[inputs.pr_number]
type = "integer"
required = true
description = "PR number to evaluate"
[inputs.holdout_repo]
type = "string"
required = false
default = "ssh://git@codeberg.org/johba/harb-holdout-scenarios.git"
description = """
Holdout scenarios repo. Dev-agent has no read access cloned at runtime
by evaluate.sh into the ephemeral worktree, never checked in to harb.
"""
# ── Execution ──────────────────────────────────────────────────────────────────
#
# The orchestrator invokes evaluate.sh, which owns the full lifecycle:
# checkout → build → boot stack → clone holdout repo → playwright → teardown.
[execution]
script = "scripts/harb-evaluator/evaluate.sh"
invocation = "bash scripts/harb-evaluator/evaluate.sh {pr_number}"
# Exit codes propagated by evaluate.sh:
# 0 gate passed (≥90% of scenarios achieved 2/3 majority)
# 1 gate failed (at least one scenario failed the 2/3 threshold)
# 2 infra error (stack failed to start, missing dependency, etc.)
# ── Steps ──────────────────────────────────────────────────────────────────────
[[steps]]
id = "boot-stack"
description = """
Spin up full docker stack from PR branch.
evaluate.sh creates an isolated git worktree, builds kraiken-lib,
installs npm deps, installs Playwright browser binaries, then runs:
docker compose -p harb-eval-{pr_number} up -d
Waits for anvil (healthy), bootstrap (exited 0), ponder (healthy + /ready).
"""
[[steps]]
id = "clone-holdout"
description = """
Clone harb-holdout-scenarios into .holdout-scenarios/ inside the worktree.
Sets HOLDOUT_SCENARIOS_DIR for holdout.config.ts.
The dev-agent never sees this repo; the wall is enforced by separate
repository access control on Codeberg.
"""
[[steps]]
id = "run-scenarios"
description = """
Run 8 Playwright specs via holdout.config.ts (workers=1, headless chromium).
4 surfaces: contracts, graphql, landing, webapp.
Each scenario is executed up to 3 times; 2/3 runs must pass.
"""
surfaces = ["contracts", "graphql", "landing", "webapp"]
scenarios_per_surface = 2
scenarios_total = 8
runs_per_scenario = 3
pass_per_scenario = 2 # 2-of-3 majority required for a scenario to count as passed
[[steps]]
id = "teardown"
description = """
docker compose -p harb-eval-{pr_number} down -v --remove-orphans
git worktree remove --force {worktree_dir}
Always runs cleanup is registered as a shell trap in evaluate.sh.
"""
[[steps]]
id = "deliver"
description = """
Collect per-scenario results from test-results/holdout-reports/.
Write evidence/holdout/{date}-pr{pr_number}.json and commit to main.
Post gate verdict to issue #{pr_number}.
On failure: include one-line reason per failed scenario.
Scenario text is never exposed to the dev-agent.
"""
# ── Gate ───────────────────────────────────────────────────────────────────────
[gate]
pass_threshold_pct = 90 # ≥90% of scenarios must pass
scenarios_total = 8 # 8 * 0.9 = 7.2 → at least 8 must pass to clear 90%
per_scenario_runs = 3
per_scenario_pass = 2 # 2-of-3 majority per scenario
# ── Products ───────────────────────────────────────────────────────────────────
[products.evidence_file]
path = "evidence/holdout/{date}-pr{pr_number}.json"
delivery = "commit to main"
schema = "evidence/README.md" # see §Schema: holdout/YYYY-MM-DD-prNNN.json
[products.issue_comment]
delivery = "post to issue #{pr_number}"
content = "gate verdict (pass/fail), scenarios_passed/scenarios_total, link to evidence file"
on_failure = "one-line failure reason per failing scenario; scenario text never revealed"
# ── Resources ──────────────────────────────────────────────────────────────────
[resources]
profile = "heavy"
containers = "5+" # anvil, bootstrap, ponder, webapp, (caddy if needed)
browser = "chromium (Playwright)"
ports = ["8545", "42069", "5173", "8081", "5100"]
concurrency = "exclusive — port bindings prevent parallel runs on the same host"
# ── Notes ──────────────────────────────────────────────────────────────────────
[notes]
wall = """
The holdout-specs repo (harb-holdout-scenarios) is intentionally inaccessible
to the dev-agent. The agent receives only pass/fail and one-line failure reasons
never the scenario text. This is enforced by Codeberg repo permissions, not
by runtime filtering.
"""

View file

@ -1,187 +0,0 @@
# formulas/run-protocol.toml
#
# On-chain protocol health snapshot — collect TVL, accumulated fees,
# position count, and rebalance frequency from the deployed LiquidityManager.
# Write a structured JSON evidence file for planner and predictor consumption.
#
# Type: sense. Read-only — produces metrics only, no git artifacts.
#
# Staleness threshold: 1 day (matches evidence/protocol/ schema).
# Cron: daily at 07:00 UTC (staggered 1 h after run-resources).
[formula]
id = "run-protocol"
name = "On-Chain Protocol Health Snapshot"
description = "Collect TVL, accumulated fees, position count, and rebalance frequency from the deployed LiquidityManager; write evidence/protocol/{date}.json."
type = "sense"
# "sense" → read-only, produces metrics only
# "act" → produces git artifacts (cf. run-evolution, run-red-team)
# ── Cron ───────────────────────────────────────────────────────────────────────
[cron]
schedule = "0 7 * * *" # daily at 07:00 UTC (1 h after run-resources)
description = "Matches 1-day staleness threshold — one snapshot per day keeps the record fresh."
# ── Inputs ─────────────────────────────────────────────────────────────────────
[inputs.rpc_url]
type = "string"
required = true
description = """
Base network RPC endpoint used to query on-chain state.
Example: https://mainnet.base.org or a running Anvil fork URL.
"""
[inputs.deployments_file]
type = "string"
required = false
default = "onchain/deployments-local.json"
description = """
Path to the deployments JSON file containing contract addresses.
The formula reads LiquidityManager address from this file.
Use onchain/deployments.json for mainnet; onchain/deployments-local.json
for a local Anvil fork.
"""
[inputs.lookback_blocks]
type = "integer"
required = false
default = 7200
description = """
Number of blocks to scan for Recenter events when computing
rebalance_count_24h (~24 h of Base blocks at ~2 s/block).
"""
# ── Execution ──────────────────────────────────────────────────────────────────
[execution]
script = "scripts/harb-evaluator/run-protocol.sh"
invocation = "RPC_URL={rpc_url} DEPLOYMENTS_FILE={deployments_file} LOOKBACK_BLOCKS={lookback_blocks} bash scripts/harb-evaluator/run-protocol.sh"
# Exit codes:
# 0 snapshot written successfully
# 2 infrastructure error (RPC unreachable, missing deployments file, forge unavailable, etc.)
# ── Steps ──────────────────────────────────────────────────────────────────────
[[steps]]
id = "read-addresses"
description = """
Read the LiquidityManager contract address from {deployments_file}.
Fail with exit code 2 if the file is absent or the address is missing.
"""
[[steps]]
id = "collect-tvl"
description = """
Query LiquidityManager total ETH via forge script LmTotalEth.s.sol
against {rpc_url}.
Records tvl_eth (wei string) and tvl_eth_formatted (ETH, 2 dp).
LmTotalEth.s.sol uses exact Uniswap V3 integer math (LiquidityAmounts +
TickMath) to sum free ETH, free WETH, and ETH locked across all three
positions (floor, anchor, discovery).
"""
forge_script = "onchain/script/LmTotalEth.s.sol"
[[steps]]
id = "collect-fees"
description = """
Query accumulated protocol fees from the LiquidityManager via cast call:
cast call $LM "accumulatedFees()(uint256)"
Records accumulated_fees_eth (wei string) and accumulated_fees_eth_formatted
(ETH, 3 dp).
Falls back to 0 gracefully if the function reverts or is not present on
the deployed contract (older deployment without fee tracking).
"""
[[steps]]
id = "collect-positions"
description = """
Query the three Uniswap V3 positions held by the LiquidityManager:
LiquidityManager.positions(0) (liquidity, tickLower, tickUpper) # FLOOR
LiquidityManager.positions(1) (liquidity, tickLower, tickUpper) # ANCHOR
LiquidityManager.positions(2) (liquidity, tickLower, tickUpper) # DISCOVERY
Records position_count (number of positions with liquidity > 0) and the
positions array.
"""
[[steps]]
id = "collect-rebalances"
description = """
Count Recenter events emitted by the LiquidityManager in the past
{lookback_blocks} blocks via eth_getLogs.
Records:
- rebalance_count_24h: total Recenter event count in the window.
- last_rebalance_block: block number of the most recent Recenter event
(0 if none found in the window).
"""
event_signature = "Recentered(int24,bool)"
[[steps]]
id = "collect"
description = """
Assemble all collected metrics into evidence/protocol/{date}.json.
Compute verdict:
- "offline" if tvl_eth = 0 or RPC was unreachable.
- "degraded" if position_count < 3, or rebalance_count_24h = 0 and the
protocol has been live for > 1 day.
- "healthy" otherwise.
Write the file conforming to the schema in evidence/README.md
## Schema: protocol/YYYY-MM-DD.json.
"""
output = "evidence/protocol/{date}.json"
schema = "evidence/README.md" # see ## Schema: protocol/YYYY-MM-DD.json
[[steps]]
id = "deliver"
description = """
Commit evidence/protocol/{date}.json to main.
Post a one-line summary comment to the originating issue (if any):
verdict, tvl_eth_formatted, accumulated_fees_eth_formatted,
position_count, rebalance_count_24h.
On "degraded" or "offline": highlight the failing dimension and its value.
"""
# ── Products ───────────────────────────────────────────────────────────────────
[products.evidence_file]
path = "evidence/protocol/{date}.json"
delivery = "commit to main"
schema = "evidence/README.md" # see ## Schema: protocol/YYYY-MM-DD.json
[products.issue_comment]
delivery = "post to originating issue (if any)"
content = "verdict, tvl_eth_formatted, accumulated_fees_eth_formatted, position_count, rebalance_count_24h"
on_degraded = "highlight failing dimension and its current value"
# ── Resources ──────────────────────────────────────────────────────────────────
[resources]
profile = "light"
compute = "local — forge script + cast calls only; no Anvil or Docker startup required"
rpc = "Base network RPC ({rpc_url}) — read-only calls"
concurrency = "safe to run in parallel with other formulas"
# ── Notes ──────────────────────────────────────────────────────────────────────
[notes]
tvl_metric = """
TVL is measured as LiquidityManager total ETH: free ETH + free WETH + ETH
locked across all three Uniswap V3 positions (floor, anchor, discovery).
Uses the same LmTotalEth.s.sol forge script as run-red-team to ensure
consistent measurement methodology.
"""
rebalance_staleness = """
A zero rebalance_count_24h on an established deployment indicates the
recenter() upkeep bot (services/txnBot) has stalled. The "degraded"
verdict triggers a planner alert. On a fresh deployment (< 1 day old)
zero rebalances is expected and does not trigger degraded.
"""
fees_fallback = """
accumulated_fees_eth falls back to 0 for deployments without fee tracking.
The verdict is not affected by a zero fee value alone only TVL and
position_count drive the verdict.
"""

View file

@ -1,257 +0,0 @@
# formulas/run-red-team.toml
#
# Adversarial red-team — spin up isolated stack, run adversarial agent against
# the active optimizer, commit evidence, export newly discovered attack vectors.
#
# Type: act. Produces evidence (floor held / broken) AND git artifacts
# (new attack vectors via PR to onchain/script/backtesting/attacks/).
#
# Depends on: #973 (evidence/red-team/ directory structure)
# #974 (promote-attacks.sh for attack vector export)
[formula]
id = "run-red-team"
name = "Adversarial Red-Team"
description = "Spin up isolated stack, run adversarial agent against the active optimizer, commit evidence, export new attack vectors."
type = "act"
# "sense" → read-only, produces metrics only
# "act" → produces git artifacts (cf. run-evolution, run-red-team)
depends_on = [973, 974]
# ── Inputs ─────────────────────────────────────────────────────────────────────
[inputs.candidate_name]
type = "string"
required = false
default = "unknown"
description = "Human-readable label used in evidence records and attack filenames (passed as CANDIDATE_NAME)."
[inputs.optimizer_profile]
type = "string"
required = false
default = "push3-default"
description = "Named optimizer profile / variant (e.g. push3-default, evo_run004_champion) passed as OPTIMIZER_PROFILE."
[inputs.attack_dir]
type = "string"
required = false
default = "onchain/script/backtesting/attacks"
description = """
Directory containing existing .jsonl attack patterns for the structured
attack suite. Forwarded to red-team.sh as ATTACK_DIR.
"""
[inputs.claude_timeout]
type = "integer"
required = false
default = 7200
description = "Timeout in seconds for the adversarial agent run (maps to CLAUDE_TIMEOUT env var)."
# ── Execution ──────────────────────────────────────────────────────────────────
#
# red-team.sh owns the full lifecycle:
# bootstrap-light → fund LM → snapshot → adversarial agent → collect
# → promote-attacks (if floor broken) → deliver → teardown.
#
# CANDIDATE_NAME and OPTIMIZER_PROFILE label the evidence record and attack
# filenames. To deploy a specific Push3 candidate, set the CANDIDATE env var
# (path to a .push3 file) — bootstrap-light.sh will transpile, recompile, and
# upgrade the Optimizer proxy to OptimizerV3 (see notes.candidate_injection).
[execution]
script = "scripts/harb-evaluator/red-team.sh"
invocation = "CANDIDATE_NAME={candidate_name} OPTIMIZER_PROFILE={optimizer_profile} CLAUDE_TIMEOUT={claude_timeout} ATTACK_DIR={attack_dir} bash scripts/harb-evaluator/red-team.sh"
# Exit codes propagated by red-team.sh:
# 0 floor held (LM total ETH did not decrease)
# 1 floor broken (adversary extracted ETH from LiquidityManager)
# 2 infra error (Anvil unreachable, bootstrap failed, missing dependency, etc.)
# ── Steps ──────────────────────────────────────────────────────────────────────
[[steps]]
id = "stack-up"
description = """
Bootstrap an isolated Anvil fork with contracts deployed.
scripts/harb-evaluator/bootstrap-light.sh:
- Starts a fresh Anvil instance (or reuses one if already running).
- Deploys KRK, LM, Stake, and OptimizerProxy via DeployLocal.sol.
- Funds LM with 1 000 ETH (as WETH) and calls recenter() to deploy
liquidity into positions establishing a realistic baseline.
- Verifies Anvil responds and all contract addresses are present in
onchain/deployments-local.json before proceeding.
When the CANDIDATE env var is set (path to a .push3 file), bootstrap-light.sh
transpiles the candidate and upgrades the Optimizer proxy to OptimizerV3.
See notes.candidate_injection for details.
"""
[[steps]]
id = "run-attack-suite"
description = """
Run every existing .jsonl attack file in {attack_dir} through
onchain/script/backtesting/AttackRunner.s.sol.
For each file:
- Record LM total ETH before and after via forge script LmTotalEth.s.sol.
- Revert to the baseline Anvil snapshot between files so attacks are
independent.
- Emit one result entry: strategy name, abstract op pattern,
floor held / broken, delta in basis points.
This phase exhausts the known attack catalogue before the adversarial
agent is given a turn, seeding its memory with which strategies are
already understood.
"""
attack_source = "{attack_dir}/*.jsonl"
forge_script = "onchain/script/backtesting/AttackRunner.s.sol"
snapshot_mode = "revert-between-attacks"
[[steps]]
id = "run-adversarial-agent"
description = """
Spawn the Claude adversarial agent (red-team-program.md prompt) with full
write access to cast / forge / python3 / jq.
Goal: make ethPerToken() decrease i.e. extract ETH from LiquidityManager.
The agent:
1. Iterates freely: snapshot craft novel attack execute measure
revert repeat.
2. Appends each attempted strategy to tmp/red-team-report.txt and
tmp/red-team-stream.jsonl.
3. On any confirmed ETH decrease: exports the winning op sequence to
tmp/red-team-attacks.jsonl and continues searching.
Runs until CLAUDE_TIMEOUT expires or the agent signals completion.
"""
timeout_env = "CLAUDE_TIMEOUT"
memory_file = "tmp/red-team-memory.jsonl" # cross-run pattern learning
report_file = "tmp/red-team-report.txt"
stream_file = "tmp/red-team-stream.jsonl"
[[steps]]
id = "collect"
description = """
After the agent run, red-team.sh:
1. Reads LM total ETH after (forge script LmTotalEth.s.sol).
2. Extracts strategy findings from tmp/red-team-stream.jsonl and appends
them to tmp/red-team-memory.jsonl for cross-run learning.
3. Exports the agent's cast send commands from the stream log to
tmp/red-team-attacks.jsonl via export-attacks.py.
4. Replays the exported sequence through AttackRunner.s.sol, writing full
state snapshots to tmp/red-team-snapshots.jsonl (used for optimizer
training; non-fatal if replay produces no output).
5. Computes floor_held / floor_broken and writes evidence/red-team/{date}.json
conforming to the schema in evidence/README.md ## Schema: red-team/.
"""
output = "evidence/red-team/{date}.json"
schema = "evidence/README.md" # see ## Schema: red-team/YYYY-MM-DD.json
side_output_file = "tmp/red-team-snapshots.jsonl" # AttackRunner state snapshots for optimizer training
[[steps]]
id = "export-vectors"
description = """
Only runs when the floor is broken (BROKE=true in red-team.sh).
If tmp/red-team-attacks.jsonl is non-empty, call promote-attacks.sh to open
a Codeberg PR with the newly discovered attack vectors.
promote-attacks.sh:
- Deduplicates by op-type fingerprint against existing files in
onchain/script/backtesting/attacks/.
- Auto-classifies the attack type (staking, il-crystallization,
floor-ratchet, fee-drain, lp-manipulation, floor-attack, ).
- Creates a git branch, commits the new .jsonl, and opens a Codeberg PR
targeting main, including the ETH extraction amount in the PR title and body.
- Exits 0 when no novel patterns remain after deduplication (non-fatal).
Skipped gracefully if CODEBERG_TOKEN and ~/.netrc are both absent.
Not called when the floor holds novel-but-non-exploiting patterns are
not promoted.
"""
script = "scripts/harb-evaluator/promote-attacks.sh"
args = "--attacks tmp/red-team-attacks.jsonl --candidate {candidate_name} --profile {optimizer_profile} --eth-extracted <delta_wei> --eth-before <lm_eth_before_wei>"
# --eth-extracted and --eth-before are computed at runtime by red-team.sh (lm_eth_before lm_eth_after)
# and passed directly to promote-attacks.sh — they are not formula inputs.
[[steps]]
id = "stack-down"
description = """
Tear down the Anvil instance started in stack-up.
red-team.sh registers cleanup() as a shell trap (EXIT / INT / TERM):
- Reverts to the baseline Anvil snapshot.
- Kills the Claude sub-process if still running.
Always runs even on infra error so port 8545 is not left occupied.
"""
[[steps]]
id = "deliver"
description = """
Commit evidence/red-team/{date}.json to main and post a summary comment
to the originating issue.
Comment includes:
- Verdict (floor_held / floor_broken).
- ETH extracted (formatted in ETH) and delta in basis points.
- Total attacks tried (agent-discovered count + structured suite count).
- Link to committed evidence file.
- If novel vectors were promoted: link to the attack-vector PR.
On floor_broken: also include the highest-yield attack strategy name and
its abstract op pattern.
"""
# ── Products ───────────────────────────────────────────────────────────────────
[products.evidence_file]
path = "evidence/red-team/{date}.json"
delivery = "commit to main"
schema = "evidence/README.md" # see ## Schema: red-team/YYYY-MM-DD.json
[products.attack_vectors]
path = "onchain/script/backtesting/attacks/{attack_type}-{candidate_name}.jsonl"
# {attack_type} is not a formula input — it is computed at runtime by
# promote-attacks.sh's classifier (staking, il-crystallization, floor-ratchet, …).
delivery = "PR to main"
script = "scripts/harb-evaluator/promote-attacks.sh"
note = "Only created when the floor is broken AND novel (deduplicated) attack vectors are discovered."
[products.issue_comment]
delivery = "post to originating issue"
content = "verdict (floor_held/floor_broken), ETH extracted, attacks tried, link to evidence file; if vectors found: link to attack-vector PR"
on_failure = "include highest-yield attack name and op pattern; full agent transcript available in tmp/red-team-stream.jsonl"
# ── Resources ──────────────────────────────────────────────────────────────────
[resources]
profile = "heavy"
compute = "local — Anvil fork + revm, no Docker required"
rpc = "Anvil (bootstrap-light, default port 8545)"
agent = "Claude (claude CLI, CLAUDE_TIMEOUT seconds)"
concurrency = "exclusive — shares Anvil port 8545 with run-holdout and other heavy formulas"
# ── Notes ──────────────────────────────────────────────────────────────────────
[notes]
floor_metric = """
The primary safety metric is LM total ETH: free ETH + free WETH + ETH locked
across all three Uniswap V3 positions (floor, anchor, discovery).
Measured via forge script LmTotalEth.s.sol using exact Uniswap V3 integer
math (LiquidityAmounts + TickMath). A decrease in total ETH = floor broken.
"""
attack_dedup = """
promote-attacks.sh fingerprints each candidate attack by its abstract op
sequence (e.g. wrap buy stake recenter_multi sell) and compares
against all existing files in onchain/script/backtesting/attacks/.
Only genuinely novel sequences are included in the PR duplicate
rediscoveries are silently dropped and the step exits 0.
"""
candidate_injection = """
Push3 candidate injection is supported via the CANDIDATE env var in
bootstrap-light.sh. When CANDIDATE points to a .push3 file the script:
1. Invokes push3-transpiler to regenerate OptimizerV3Push3.sol.
2. Extracts the function body into OptimizerV3Push3Lib.sol (shared library).
3. Deploys contracts normally via DeployLocal.sol (Optimizer v1 behind UUPS proxy).
4. Deploys a fresh OptimizerV3 implementation and upgrades the proxy via upgradeTo().
The candidate_name and optimizer_profile inputs remain metadata-only (evidence
records, attack filenames, PR titles).
"""
run_attack_suite_gap = """
The run-attack-suite step is implemented in red-team.sh (step 5a). It loops
through every *.jsonl file in the attack directory, replays each through
AttackRunner.s.sol, records LM total ETH before/after with snapshot revert
between files, and injects results into the agent prompt.
"""

View file

@ -1,155 +0,0 @@
# formulas/run-resources.toml
#
# Infrastructure resource snapshot — collect disk usage, RAM trends,
# Anthropic API call counts and budget burn, and Woodpecker CI queue depth.
# Write a structured JSON evidence file for planner and predictor consumption.
#
# Type: sense. Read-only — produces metrics only, no git artifacts.
#
# Staleness threshold: 1 day (matches evidence/resources/ schema).
# Cron: daily at 06:00 UTC.
[formula]
id = "run-resources"
name = "Infrastructure Resource Snapshot"
description = "Collect disk, RAM, API usage, Anthropic budget burn, and CI queue depth; write evidence/resources/{date}.json."
type = "sense"
# "sense" → read-only, produces metrics only
# "act" → produces git artifacts (cf. run-evolution, run-red-team)
# ── Cron ───────────────────────────────────────────────────────────────────────
[cron]
schedule = "0 6 * * *" # daily at 06:00 UTC
description = "Matches 1-day staleness threshold — one snapshot per day keeps the record fresh."
# ── Inputs ─────────────────────────────────────────────────────────────────────
[inputs.disk_path]
type = "string"
required = false
default = "/"
description = "Filesystem path to measure disk usage for (passed to df)."
[inputs.anthropic_budget_usd_limit]
type = "number"
required = false
default = 50.0
description = "Configured Anthropic budget ceiling in USD. Used to compute budget_pct in the evidence record."
[inputs.woodpecker_api_url]
type = "string"
required = false
default = "http://localhost:8090"
description = "Base URL of the Woodpecker CI API. Set to empty string to skip CI metrics."
# ── Execution ──────────────────────────────────────────────────────────────────
[execution]
script = "scripts/harb-evaluator/run-resources.sh"
invocation = "DISK_PATH={disk_path} ANTHROPIC_BUDGET_USD_LIMIT={anthropic_budget_usd_limit} WOODPECKER_API_URL={woodpecker_api_url} bash scripts/harb-evaluator/run-resources.sh"
# Exit codes:
# 0 snapshot written successfully
# 2 infrastructure error (disk command unavailable, JSON write failed, etc.)
# ── Steps ──────────────────────────────────────────────────────────────────────
[[steps]]
id = "collect-disk"
description = """
Measure disk usage on {disk_path} via `df -B1 {disk_path}`.
Extract used_bytes, total_bytes, and used_pct.
"""
[[steps]]
id = "collect-ram"
description = """
Measure RAM usage via `free -b` (Linux) or `vm_stat` (macOS).
Extract used_bytes, total_bytes, and used_pct.
"""
[[steps]]
id = "collect-api"
description = """
Collect Anthropic API metrics:
- anthropic_calls_24h: count of API calls in the past 24 hours (read from
tmp/anthropic-call-log.jsonl if present; 0 if absent).
- anthropic_budget_usd_used: sum of cost_usd entries in the call log for
the current calendar day (UTC); 0 if log absent.
- anthropic_budget_usd_limit: from {anthropic_budget_usd_limit} input.
- anthropic_budget_pct: used / limit * 100 (0 if limit = 0).
"""
call_log = "tmp/anthropic-call-log.jsonl"
[[steps]]
id = "collect-ci"
description = """
Query Woodpecker CI API for queue state.
GET {woodpecker_api_url}/api/queue/info:
- woodpecker_queue_depth: length of the waiting queue.
- woodpecker_running: count of currently running jobs.
Skipped gracefully (fields set to null) when {woodpecker_api_url} is empty
or the endpoint is unreachable.
"""
[[steps]]
id = "collect"
description = """
Assemble all collected metrics into evidence/resources/{date}.json.
Compute verdict:
- "critical" if disk_used_pct 95, ram_used_pct 95,
or anthropic_budget_pct 95.
- "warn" if disk_used_pct 80, ram_used_pct 80,
or anthropic_budget_pct 80.
- "ok" otherwise.
Write the file conforming to the schema in evidence/README.md
## Schema: resources/YYYY-MM-DD.json.
"""
output = "evidence/resources/{date}.json"
schema = "evidence/README.md" # see ## Schema: resources/YYYY-MM-DD.json
[[steps]]
id = "deliver"
description = """
Commit evidence/resources/{date}.json to main.
Post a one-line summary comment to the originating issue (if any):
verdict, disk_used_pct, ram_used_pct, anthropic_budget_pct, ci queue depth.
On "warn" or "critical": highlight the breaching dimensions.
"""
# ── Products ───────────────────────────────────────────────────────────────────
[products.evidence_file]
path = "evidence/resources/{date}.json"
delivery = "commit to main"
schema = "evidence/README.md" # see ## Schema: resources/YYYY-MM-DD.json
[products.issue_comment]
delivery = "post to originating issue (if any)"
content = "verdict, disk_used_pct, ram_used_pct, anthropic_budget_pct, ci queue depth"
on_warn = "highlight breaching dimensions and current values"
# ── Resources ──────────────────────────────────────────────────────────────────
[resources]
profile = "light"
compute = "local — shell commands only (df, free, curl); no Docker or Anvil required"
concurrency = "safe to run in parallel with other formulas"
# ── Notes ──────────────────────────────────────────────────────────────────────
[notes]
call_log = """
tmp/anthropic-call-log.jsonl is expected to have one JSON object per line,
each with at minimum:
{ "ts": "<ISO timestamp>", "cost_usd": <number> }
The file is written by the dark-factory agent loop. When absent the API
metrics default to 0 the snapshot is still written rather than failing.
"""
disk_warn = """
Planner MEMORY.md (2026-03-20) notes disk at 79%. The "warn" threshold
(80%) will fire on the first run-resources pass. Monitor trajectory;
evidence pipeline data accumulation will increase disk pressure.
"""

View file

@ -1,109 +0,0 @@
# formulas/run-user-test.toml
#
# Persona-based UX evaluation against the harb stack.
#
# Type: sense — produces UX metrics, changes no code or contracts.
# The formula spins up a full self-contained stack, runs Playwright against
# all 5 personas, collects structured reports, then tears the stack down.
[formula]
id = "run-user-test"
type = "sense"
description = "Persona-based UX evaluation against the harb stack"
depends_on = [973] # evidence directory structure must exist
# ── Stack management ─────────────────────────────────────────────────────────
# The formula is self-contained: it starts and stops its own stack.
[stack]
start_cmd = "./scripts/dev.sh start"
health_cmd = "./scripts/dev.sh health"
stop_cmd = "./scripts/dev.sh stop"
# ── Inputs ───────────────────────────────────────────────────────────────────
# 5 personas across 2 funnels. Each persona has a dedicated Playwright spec
# that simulates the full journey: connect wallet → mint ETH → buy KRK →
# stake → verify position.
[[inputs.funnels]]
name = "passive-holder"
[[inputs.funnels.personas]]
name = "tyler"
display = "Tyler 'Bags' Morrison"
spec = "tests/e2e/usertest/tyler-retail-degen.spec.ts"
[[inputs.funnels.personas]]
name = "alex"
display = "Alex Rivera"
spec = "tests/e2e/usertest/alex-newcomer.spec.ts"
[[inputs.funnels.personas]]
name = "sarah"
display = "Sarah Park"
spec = "tests/e2e/usertest/sarah-yield-farmer.spec.ts"
[[inputs.funnels]]
name = "staker"
[[inputs.funnels.personas]]
name = "priya"
display = "Dr. Priya Malhotra"
spec = "tests/e2e/usertest/priya-institutional.spec.ts"
[[inputs.funnels.personas]]
name = "marcus"
display = "Marcus 'Flash' Chen"
spec = "tests/e2e/usertest/marcus-degen.spec.ts"
# ── Steps ────────────────────────────────────────────────────────────────────
[[steps]]
id = "stack-up"
description = "Spin up full stack (boots Docker services, waits for health)"
run = "./scripts/dev.sh start"
[[steps]]
id = "run-personas"
description = "Run all 5 personas via Playwright (workers=1, sequential to avoid account conflicts)"
run = "./scripts/run-usertest.sh"
after = ["stack-up"]
[[steps]]
id = "collect"
description = "Aggregate per-persona JSON reports from tmp/usertest-results/ into evidence/user-test/{date}.json"
output = "evidence/user-test/{date}.json"
schema = "evidence/README.md#user-test"
after = ["run-personas"]
[[steps]]
id = "stack-down"
description = "Tear down stack"
run = "./scripts/dev.sh stop"
after = ["collect"]
[[steps]]
id = "deliver"
description = "Commit evidence file to main and post summary to issue as comment"
after = ["collect"]
# ── Products ─────────────────────────────────────────────────────────────────
# Three outputs following the standard evidence delivery pattern:
# 1. evidence file → committed to main
# 2. screenshots → referenced inside the evidence file
# 3. summary → posted as issue comment
[[products]]
type = "evidence"
path = "evidence/user-test/{date}.json"
schema = "evidence/README.md#user-test"
destination = "commit"
[[products]]
type = "screenshots"
path = "test-results/usertest/"
destination = "evidence-ref" # paths recorded in the evidence file, not committed separately
[[products]]
type = "summary"
destination = "issue-comment"

View file

View file

@ -1 +0,0 @@
[]

View file

@ -1,2 +1,2 @@
dist
coverage
node_modules

View file

@ -1,4 +1,3 @@
<!-- last-reviewed: baa501fa46355f7b04bffdf386d397ad19f69298 -->
# Kraiken Library - Agent Guide
Shared TypeScript helpers used by the landing app, txnBot, and other services to talk to KRAIKEN contracts and the Ponder GraphQL API.
@ -9,10 +8,11 @@ Shared TypeScript helpers used by the landing app, txnBot, and other services to
- Centralise staking math (tax calculations, snatch selection, share conversions) for reuse across clients.
## Key Modules
- `src/staking.ts` - Harberger staking helpers for delinquency checks and snatch math.
- `src/snatch.ts` - Snatch selection engine and supporting types.
- `src/ids.ts` - Position ID encoding helpers.
- `src/subgraph.ts` - Byte utilities shared between the GraphQL layer and clients.
- `src/kraiken.ts` - Token-facing helpers and supply utilities.
- `src/stake.ts` - Staking math, Harberger tax helpers, snatch scoring.
- `src/chains.ts` - Chain constants and deployment metadata.
- `src/queries/` - GraphQL operations that target the Ponder schema.
- `src/__generated__/graphql.ts` - Codegen output consumed throughout the stack.
- `src/abis.ts` - Contract ABIs imported directly from `onchain/out/` forge artifacts. Single source of truth for all ABI consumers.
- `src/taxRates.ts` - Generated from `onchain/src/Stake.sol` by `scripts/sync-tax-rates.mjs`; never edit by hand.
- `src/version.ts` - Version validation system tracking `KRAIKEN_LIB_VERSION` and `COMPATIBLE_CONTRACT_VERSIONS` for runtime dependency checking.
@ -29,25 +29,20 @@ Shared TypeScript helpers used by the landing app, txnBot, and other services to
- `npm test` - Execute Jest suite for helper coverage.
## Integration Notes
- Landing app consumes `kraiken-lib/abis`, `kraiken-lib/staking`, and `kraiken-lib/subgraph` for ABI resolution and ID conversion.
- txnBot relies on `kraiken-lib/staking` and `kraiken-lib/ids` to evaluate profitability and tax windows.
- Ponder imports `kraiken-lib/abis` for indexing, and `kraiken-lib/version` for cross-service version checks.
- Landing app consumes helpers for UI projections and staking copy.
- txnBot relies on the same helpers to evaluate profitability and tax windows.
- When the Ponder schema changes, rerun `npm run compile` and commit regenerated types to prevent drift.
## Import Guidance
- The legacy `helpers.ts` barrel has been removed. Always import from the narrow subpaths (e.g. `kraiken-lib/abis`, `kraiken-lib/staking`, `kraiken-lib/snatch`, `kraiken-lib/subgraph`).
- Avoid importing `kraiken-lib` directly; the root module no longer re-exports the helper surface and exists only to raise build-time errors for bundle imports.
## ES Module Architecture
- **Module Type**: This package is built as ES modules (`"type": "module"` in package.json). All consumers must support ES modules.
- **Import Extensions**: All relative imports in TypeScript source files MUST include `.js` extensions (e.g., `from "./staking.js"`). This is required for ES module resolution even though the source files are `.ts`.
- **Import Extensions**: All relative imports in TypeScript source files MUST include `.js` extensions (e.g., `from "./helpers.js"`). This is required for ES module resolution even though the source files are `.ts`.
- **JSON Imports**: JSON files (like ABI artifacts) must use import assertions: `import Foo from './path.json' assert { type: 'json' }`.
- **TypeScript Config**: `tsconfig.json` must specify:
- `"module": "esnext"` - Generate ES module syntax
- `"moduleResolution": "node"` - Enable proper module resolution
- `"rootDir": "./src"` - Ensure flat output structure in `dist/`
- **Build Output**: Running `npx tsc` produces ES module `.js` files in `dist/` that can be consumed by both browser (Vite) and Node.js (≥14 with `"type": "module"`).
- **Container Mount**: Docker services bind-mount `dist/` read-only from the host. Run `./scripts/build-kraiken-lib.sh` before `docker-compose up` or keep `scripts/watch-kraiken-lib.sh` running to rebuild automatically.
- **Container Mount**: Podman/Docker services now bind-mount `dist/` read-only from the host. Run `./scripts/build-kraiken-lib.sh` before `podman-compose up` or keep `scripts/watch-kraiken-lib.sh` running to rebuild automatically.
## Quality Guidelines
- Keep helpers pure and side-effect free; they should accept explicit dependencies.

View file

@ -16,7 +16,7 @@ yarn add kraiken-lib
then
```
import { bytesToUint256LittleEndian, uint256ToBytesLittleEndian } from "kraiken-lib/subgraph";
import { bytesToUint256LittleEndian, uint256ToBytesLittleEndian } from "kraiken-lib";
uint256ToBytesLittleEndian(3n);
```
@ -24,7 +24,7 @@ uint256ToBytesLittleEndian(3n);
## get Snatch List
```
import { getSnatchList } from "kraiken-lib/snatch";
import { getSnatchList } from "kraiken-lib";
const positionIds = getSnatchList(positions, neededShares, maxTaxRateDecimal, stakeTotalSupply);
```

View file

@ -0,0 +1,5 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
maxWorkers: 1
};

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,7 @@
{
"name": "kraiken-lib",
"version": "1.0.0",
"version": "0.2.0",
"description": "helper functions and snatch selection",
"packageManager": "npm@11.6.1",
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@ -12,6 +11,11 @@
"require": "./dist/index.js",
"import": "./dist/index.js"
},
"./helpers": {
"types": "./dist/helpers.d.ts",
"require": "./dist/helpers.js",
"import": "./dist/helpers.js"
},
"./ids": {
"types": "./dist/ids.d.ts",
"require": "./dist/ids.js",
@ -46,24 +50,13 @@
"types": "./dist/version.d.ts",
"require": "./dist/version.js",
"import": "./dist/version.js"
},
"./position": {
"types": "./dist/position.d.ts",
"require": "./dist/position.js",
"import": "./dist/position.js"
},
"./format": {
"types": "./dist/format.d.ts",
"require": "./dist/format.js",
"import": "./dist/format.js"
}
},
"files": [
"/dist"
],
"scripts": {
"test": "vitest run",
"test:coverage": "vitest run --coverage",
"test": "jest",
"compile": "graphql-codegen",
"watch": "graphql-codegen -w",
"lint": "eslint 'src/**/*.ts'",
@ -81,8 +74,7 @@
"dependencies": {
"@apollo/client": "^3.9.10",
"graphql": "^16.8.1",
"graphql-tag": "^2.12.6",
"viem": "^2.22.13"
"graphql-tag": "^2.12.6"
},
"devDependencies": {
"@graphql-codegen/cli": "^5.0.2",
@ -90,16 +82,16 @@
"@graphql-codegen/typescript": "^4.0.6",
"@graphql-codegen/typescript-operations": "^4.2.0",
"@graphql-typed-document-node/core": "^3.2.0",
"@types/jest": "^29.5.12",
"@types/node": "^24.6.0",
"@typescript-eslint/eslint-plugin": "^8.45.0",
"@typescript-eslint/parser": "^8.45.0",
"@vitest/coverage-v8": "^3.0.0",
"eslint": "^9.36.0",
"husky": "^9.1.7",
"jest": "^29.7.0",
"lint-staged": "^16.2.3",
"picomatch": "^4.0.3",
"prettier": "^3.6.2",
"typescript": "^5.4.3",
"vitest": "^3.0.0"
"ts-jest": "^29.1.2",
"typescript": "^5.4.3"
}
}

View file

@ -18,28 +18,8 @@ export const KRAIKEN_ABI = KraikenForgeOutput.abi;
*/
export const STAKE_ABI = StakeForgeOutput.abi;
/**
* LiquidityManager events-only ABI
* Tracks recenters, ETH reserve, and VWAP data
*/
// eslint-disable-next-line @typescript-eslint/naming-convention
export const LiquidityManagerAbi = [
{"type":"event","name":"EthAbundance","inputs":[{"name":"currentTick","type":"int24","indexed":false},{"name":"ethBalance","type":"uint256","indexed":false},{"name":"outstandingSupply","type":"uint256","indexed":false},{"name":"vwap","type":"uint256","indexed":false},{"name":"vwapTick","type":"int24","indexed":false}],"anonymous":false},
{"type":"event","name":"EthScarcity","inputs":[{"name":"currentTick","type":"int24","indexed":false},{"name":"ethBalance","type":"uint256","indexed":false},{"name":"outstandingSupply","type":"uint256","indexed":false},{"name":"vwap","type":"uint256","indexed":false},{"name":"vwapTick","type":"int24","indexed":false}],"anonymous":false},
{"type":"event","name":"Recentered","inputs":[{"name":"currentTick","type":"int24","indexed":true},{"name":"isUp","type":"bool","indexed":true}],"anonymous":false}
] as const;
export const LM_ABI = LiquidityManagerAbi;
// Re-export for convenience
export const ABIS = {
Kraiken: KRAIKEN_ABI,
Stake: STAKE_ABI,
LiquidityManager: LM_ABI,
} as const;
// Backward-compatible aliases
// eslint-disable-next-line @typescript-eslint/naming-convention
export const KraikenAbi = KRAIKEN_ABI;
// eslint-disable-next-line @typescript-eslint/naming-convention
export const StakeAbi = STAKE_ABI;

View file

@ -1,33 +0,0 @@
import { formatUnits } from 'viem';
/** Convert wei (bigint or string) to a JS number. Core primitive. */
export function weiToNumber(value: bigint | string, decimals = 18): number {
const bi = typeof value === 'string' ? BigInt(value || '0') : (value ?? 0n);
return Number(formatUnits(bi, decimals));
}
/** Format wei to fixed decimal string (e.g. "0.00123") */
export function formatWei(value: bigint | string, decimals = 18, digits = 5): string {
const num = weiToNumber(value, decimals);
return num === 0 ? '0' : num.toFixed(digits);
}
/** Format number to compact display (e.g. "1.23K", "4.56M") */
export function compactNumber(value: number): string {
return Intl.NumberFormat('en-US', {
notation: 'compact',
maximumFractionDigits: 2,
}).format(value);
}
/** Format number with commas (e.g. "1,234,567") */
export function commaNumber(value: number): string {
if (!Number.isFinite(value)) return '0';
return value.toLocaleString('en-US');
}
/** Format a token amount with comma grouping and 2 decimal places (e.g. "1,234.56") */
export function formatTokenAmount(value: number): string {
if (!isFinite(value)) return '0.00';
return value.toLocaleString('en-US', { minimumFractionDigits: 2, maximumFractionDigits: 2 });
}

View file

@ -0,0 +1,4 @@
export * from './staking.js';
export * from './snatch.js';
export * from './ids.js';
export * from './taxRates.js';

View file

@ -1,5 +1,32 @@
/**
* kraiken-lib no longer exposes a bundled helper surface.
* Import from explicit subpaths such as `kraiken-lib/staking`.
*/
export {};
export { bytesToUint256LittleEndian, uint256ToBytesLittleEndian } from './subgraph.js';
// Backward compatible aliases
export { bytesToUint256LittleEndian as bytesToUint256, uint256ToBytesLittleEndian as uint256ToBytes } from './subgraph.js';
export { TAX_RATE_OPTIONS, type TaxRateOption } from './taxRates.js';
export { calculateSnatchShortfall, isPositionDelinquent } from './staking.js';
export {
minimumTaxRate,
selectSnatchPositions,
getSnatchList,
type SnatchablePosition,
type SnatchSelectionOptions,
type SnatchSelectionResult,
} from './snatch.js';
export { decodePositionId } from './ids.js';
export { KRAIKEN_ABI, STAKE_ABI, ABIS } from './abis.js';
// Backward compatible aliases
export { KRAIKEN_ABI as KraikenAbi, STAKE_ABI as StakeAbi } from './abis.js';
export {
KRAIKEN_LIB_VERSION,
COMPATIBLE_CONTRACT_VERSIONS,
STACK_META_ID,
isCompatibleVersion,
getVersionMismatchError,
} from './version.js';

Some files were not shown because too many files have changed in this diff Show more