fix: address review — migration comment, link ring buffer constants (#170)

This commit is contained in:
openhands 2026-02-22 17:57:39 +00:00
parent 3fceb4145a
commit 0fb1ed4bf8
10 changed files with 897 additions and 135 deletions

1
.gitignore vendored
View file

@ -37,3 +37,4 @@ services/ponder/.ponder/
# Temporary files # Temporary files
/tmp/ /tmp/
logs/

37
.woodpecker/review.yml Normal file
View file

@ -0,0 +1,37 @@
# PR Review Bot
# Triggers on PR open/sync, runs Claude Code review with rich context,
# posts structured review comment to Codeberg.
#
# NOTE: This runs on the host (not in Docker) because it needs:
# - claude CLI
# - git worktrees in the harb repo
# - ~/.netrc for Codeberg API
#
# For now, trigger manually or via webhook → script.
# TODO: Move to Woodpecker "local" backend when ready.
when:
event: pull_request
# Only review PRs targeting master
branch: master
steps:
review:
image: alpine
commands:
# This is a placeholder — actual review runs on host via script.
# Woodpecker's Docker backend can't access claude CLI.
#
# Two options for triggering:
#
# Option A: Codeberg webhook → OpenClaw cron/system event → review script
# - Codeberg sends PR webhook to OpenClaw gateway
# - Gateway triggers: scripts/review-pr.sh $PR_NUMBER
#
# Option B: Woodpecker local backend
# - Run woodpecker-agent with local backend (no Docker)
# - Pipeline runs directly on host
# - scripts/review-pr.sh ${CI_PULL_REQUEST_NUMBER}
#
- echo "PR review should be triggered via host script"
- echo "Run: scripts/review-pr.sh ${CI_PULL_REQUEST_NUMBER:-unknown}"

120
docs/ARCHITECTURE.md Normal file
View file

@ -0,0 +1,120 @@
# ARCHITECTURE.md — System Map
Compressed overview for AI agents. Read this first, drill into source for details.
## Contract Architecture
```
Kraiken.sol (ERC-20 token)
├── liquidityManager: address (set once, immutable after)
│ └── LiquidityManager.sol (ThreePositionStrategy)
│ ├── optimizer: Optimizer (private immutable ref)
│ ├── pool: IUniswapV3Pool
│ ├── kraiken: Kraiken
│ └── Positions: Floor, Anchor, Discovery
├── stakingPool: address
│ └── Stake.sol
│ ├── Staking positions with tax rates
│ ├── Snatch mechanics (competitive staking)
│ └── getPercentageStaked(), getAverageTaxRate()
└── feeDestination: address (protocol revenue)
Optimizer.sol (UUPS Upgradeable Proxy)
├── Reads: stake.getPercentageStaked(), stake.getAverageTaxRate()
├── Computes: sentiment → 4 liquidity params
├── Versions: Optimizer, OptimizerV2, OptimizerV3, OptimizerV3Push3
└── Admin: single address, set at initialize()
```
## Key Relationships
- **Kraiken → LiquidityManager**: set once via `setLiquidityManager()`, reverts if already set
- **LiquidityManager → Optimizer**: `private immutable` — baked into constructor, never changes
- **LiquidityManager → Kraiken**: exclusive minting/burning rights
- **Optimizer → Stake**: reads sentiment data (% staked, avg tax rate)
- **Optimizer upgrades**: UUPS proxy, admin-only `_authorizeUpgrade()`
## Three-Position Strategy
All managed by LiquidityManager via ThreePositionStrategy abstract:
| Position | Purpose | Behavior |
|----------|---------|----------|
| **Floor** | Safety net | Deep liquidity at VWAP-adjusted prices |
| **Anchor** | Price discovery | Near current price, 1-100% width |
| **Discovery** | Fee capture | Borders anchor, ~3x price range (11000 tick spacing) |
**Recenter** = atomic repositioning of all three positions. Triggered by anyone, automated by txnBot.
## Optimizer Parameters
`getLiquidityParams()` returns 4 values:
1. `capitalInefficiency` (0 to 1e18) — capital buffer level
2. `anchorShare` (0 to 1e18) — % allocated to anchor position
3. `anchorWidth` (ticks) — width of anchor position
4. `discoveryDepth` (0 to 1e18) — depth of discovery position
Sentiment calculation: `sentiment = f(averageTaxRate, percentageStaked)`
- High sentiment (bull) → wider discovery, aggressive fees
- Low sentiment (bear) → tight around floor, maximum protection
## Stack
### On-chain
- Solidity, Foundry toolchain
- Uniswap V3 for liquidity positions
- OpenZeppelin for UUPS proxy, Initializable
- Base L2 (deployment target)
### Indexer
- **Ponder** (`services/ponder/`) — indexes on-chain events
- Schema: `services/ponder/ponder.schema.ts`
- Stats table with 168-slot ring buffer (7d × 24h × 4 segments)
- Ring buffer segments: [ethReserve, minted, burned, tax] (slot 3 being changed to holderCount)
- GraphQL API at port 42069
### Landing Page
- Vue 3 + Vite (`landing/`)
- Three variants: HomeView (default), HomeViewOffensive (degens), HomeViewMixed
- Docs section: HowItWorks, Tokenomics, Staking, LiquidityManagement, AIAgent, FAQ
- LiveStats component polls Ponder GraphQL every 30s
### Staking Web App
- Vue 3 (`web-app/`)
- Password-protected (multiple passwords in LoginView.vue)
- ProtocolStatsCard shows real-time protocol metrics
### Infrastructure
- Docker Compose on 8GB VPS
- Woodpecker CI at ci.niovi.voyage
- Codeberg repo: johba/harb (private)
- Container registry: registry.niovi.voyage
## Directory Map
```
harb/
├── onchain/ # Solidity contracts + Foundry
│ ├── src/ # Contract source
│ ├── test/ # Forge tests
│ └── foundry.toml # via_ir = true required
├── services/
│ └── ponder/ # Indexer service
│ ├── ponder.schema.ts
│ ├── src/
│ │ ├── helpers/stats.ts # Ring buffer logic
│ │ ├── lm.ts # LiquidityManager indexing
│ │ └── stake.ts # Stake indexing
├── landing/ # Landing page (Vue 3)
│ ├── src/
│ │ ├── components/ # LiveStats, KFooter, WalletCard, etc.
│ │ ├── views/ # HomeView variants, docs pages
│ │ └── router/
├── web-app/ # Staking app (Vue 3)
│ ├── src/
│ │ ├── components/ # ProtocolStatsCard, etc.
│ │ └── views/ # LoginView, StakeView, etc.
├── containers/ # Docker configs, entrypoints
├── docs/ # This file, PRODUCT-TRUTH.md
└── .woodpecker/ # CI pipeline configs
```

99
docs/ENVIRONMENT.md Normal file
View file

@ -0,0 +1,99 @@
# ENVIRONMENT.md — Local Dev Stack
How to start, stop, and verify the harb development environment.
## Stack Overview
Docker Compose services (in startup order):
| Service | Purpose | Port | Health Check |
|---------|---------|------|-------------|
| **anvil** | Local Ethereum fork (Base Sepolia) | 8545 | JSON-RPC response |
| **postgres** | Ponder database | 5432 | pg_isready |
| **bootstrap** | Deploys contracts to anvil | — | One-shot, exits 0 |
| **ponder** | On-chain indexer + GraphQL API | 42069 | HTTP /ready or GraphQL |
| **landing** | Landing page (Vue 3 + Vite) | 5174 | HTTP response |
| **webapp** | Staking app (Vue 3) | 5173 | HTTP response |
| **txn-bot** | Automated recenter/tx bot | — | Process alive |
| **caddy** | Reverse proxy / TLS | 80/443 | — |
| **otterscan** | Block explorer | 5100 | — |
## Quick Start
```bash
cd /home/debian/harb
# Start everything
docker compose up -d
# Wait for bootstrap (deploys contracts, ~60-90s)
docker compose logs -f bootstrap
# Check all healthy
docker compose ps
```
## Verify Stack Health
```bash
# Anvil (local chain)
curl -s http://localhost:8545 -X POST -H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' | jq .result
# Ponder (indexer + GraphQL)
curl -s http://localhost:42069/graphql -X POST \
-H 'Content-Type: application/json' \
-d '{"query":"{ stats { id } }"}' | jq .
# Landing page
curl -sf http://localhost:5174 | head -5
# Staking app
curl -sf http://localhost:5173 | head -5
```
## Container Network
Services communicate on `harb-network` Docker bridge.
Internal hostnames match service names (e.g., `ponder:42069`).
Landing page container IP (for Playwright testing): check with
```bash
docker inspect landing --format '{{.NetworkSettings.Networks.harb_harb-network.IPAddress}}'
```
## Common URLs (for testing/review)
- **Landing:** `http://172.18.0.6:5174` (container IP) or `http://localhost:5174`
- **Staking app:** `http://localhost:5173/app/`
- **Ponder GraphQL:** `http://localhost:42069/graphql`
- **Anvil RPC:** `http://localhost:8545`
## Resource Notes
- 8GB VPS — running full stack uses ~4-5GB RAM
- npm install inside containers can OOM with all services running
- Landing container takes ~2min to restart (npm install + vite startup)
- 4GB swap is essential for CI + stack concurrency
## Staking App Passwords
For testing login: `lobsterDao`, `test123`, `lobster-x010syqe?412!`
(defined in `web-app/src/views/LoginView.vue`)
## Contract Addresses
After bootstrap, addresses are in `/home/debian/harb/tmp/containers/contracts.env`.
Landing sources this file on startup for `VITE_KRAIKEN_ADDRESS` and `VITE_STAKE_ADDRESS`.
## Playwright Testing
```bash
# Chromium path
/home/debian/.cache/ms-playwright/chromium-1209/chrome-linux64/chrome
# Run against landing (block fonts for speed)
NODE_PATH=$(npm root -g) node test-script.cjs
```
See `tmp/user-test-r4.cjs` for the most recent test script pattern.

109
docs/PRODUCT-TRUTH.md Normal file
View file

@ -0,0 +1,109 @@
# PRODUCT-TRUTH.md — What We Can and Cannot Claim
This file is the source of truth for all product messaging, docs, and marketing.
If a claim isn't here or contradicts what's here, it's wrong. Update this file
when the protocol changes — not the marketing copy.
**Last updated:** 2026-02-22
**Updated by:** Johann + Clawy after user test review session
---
## Target Audience
- **Crypto natives** who know DeFi but don't know KrAIken
- NOT beginners. NOT "new to DeFi" users.
- Think: people who've used Uniswap, understand liquidity, know what a floor price means
## The Floor
✅ **Can say:**
- Every KRK token has a minimum redemption price backed by real ETH
- The floor is enforced by immutable smart contracts
- The floor is backed by actual ETH reserves, not promises
- No rug pulls — liquidity is locked in contracts
- "Programmatic guarantee" (borrowed from Baseline — accurate for us too)
❌ **Cannot say:**
- "The floor can never decrease" — **FALSE.** Selling withdraws ETH from reserves. The floor CAN decrease.
- "Guaranteed profit" or "risk-free" — staking is leveraged exposure, it has real downside
- "Floor always goes up" — only true if fee income exceeds sell pressure, which isn't guaranteed
## The Optimizer
✅ **Can say:**
- Reads staker sentiment (% staked, average tax rate) to calculate parameters
- Returns 4 parameters: capitalInefficiency, anchorShare, anchorWidth, discoveryDepth
- Runs autonomously on-chain — no human triggers needed for parameter reads
- Is a UUPS upgradeable proxy — can be upgraded to new versions
- Currently admin-upgradeable (single admin key set at initialization)
- Multiple versions exist: Optimizer, OptimizerV2, OptimizerV3, OptimizerV3Push3
- "The optimizer evolves" — true in the sense that new versions get deployed
❌ **Cannot say:**
- "No admin keys" — **FALSE.** UUPS upgrade requires admin. Admin key exists.
- "No proxy patterns" — **FALSE.** It IS a UUPS proxy.
- "Stakers vote for new optimizers" — **NOT YET.** This is roadmap, not current state.
- "Simply evolves" / "evolves without upgrades" — misleading. It's an explicit upgrade via proxy.
- "Three strategies" — **FALSE.** It's ONE strategy with THREE positions (Floor, Anchor, Discovery).
- "AI learns from the market" — overstated. The optimizer reads staking sentiment, not market data directly.
🔮 **Roadmap (can say "planned" / "coming"):**
- Staker governance for optimizer upgrades (vote with stake weight)
- On-chain training data → new optimizer contracts via Push3 transpiler
- Remove admin key in favor of staker voting
## Liquidity Positions
✅ **Can say:**
- Three positions: Floor, Anchor, Discovery
- Floor: deep liquidity at VWAP-adjusted prices (safety net)
- Anchor: near current price, fast price discovery (1-100% width)
- Discovery: borders anchor, captures fees (wide range, ~3x current price)
- The optimizer adjusts position parameters based on sentiment
- "Recenter" = atomic repositioning of all liquidity in one transaction
- Anyone can trigger a recenter; the protocol bot does it automatically
- Bull mode: wider discovery, aggressive fee capture. Bear mode: tight around floor.
❌ **Cannot say:**
- "Three trading strategies" — it's three positions in ONE strategy
- "Token-owned liquidity" — ⚠️ USE CAREFULLY. KRK doesn't "own" anything in the legal/contract sense. The LiquidityManager manages positions. Acceptable as metaphor in marketing, not in technical docs.
## Staking
✅ **Can say:**
- Staking = leveraged directional exposure
- Stakers set tax rates; positions can be "snatched" by others willing to pay higher tax
- Tax rates influence optimizer sentiment → bull/bear positioning
- "Stakers profit when the community grows" (via supply expansion + leverage)
- Staking is optional — most holders just hold
❌ **Cannot say:**
- "Start Earning" / "Earn yield" / "APY" — staking is NOT yield farming
- "Guaranteed returns" — leveraged positions amplify losses too
- "Passive income" — tax payments are a cost, not income
## Supply Mechanics
✅ **Can say:**
- Elastic supply: buy = mint, sell = burn
- Protocol controls minting exclusively through LiquidityManager
- LiquidityManager address is set once on Kraiken contract and cannot be changed
## Code / Open Source
✅ **Can say:**
- Smart contracts are verifiable on Basescan
- Key contracts are viewable on the docs/code page
- "Full source will be published at mainnet launch" (if that's the plan)
❌ **Cannot say:**
- "Open source" — the Codeberg repo is **private**. This is currently false.
- "Audited" — unless an audit has been completed
## General Rules
1. When in doubt, understate. "The floor is backed by ETH" > "The floor guarantees you'll never lose money"
2. Separate current state from roadmap. Always.
3. Technical docs: be precise. Marketing: metaphors OK but never contradict technical reality.
4. If you're not sure a claim is true, check this file. If it's not here, verify against contract source before writing it.

View file

@ -54,6 +54,7 @@
<script setup lang="ts"> <script setup lang="ts">
import { ref, computed, onMounted, onUnmounted } from 'vue'; import { ref, computed, onMounted, onUnmounted } from 'vue';
// Must match RING_BUFFER_SEGMENTS and HOURS_IN_RING_BUFFER in services/ponder/src/helpers/stats.ts
const RING_SEGMENTS = 4; // ethReserve, minted, burned, holderCount const RING_SEGMENTS = 4; // ethReserve, minted, burned, holderCount
const RING_HOURS = 168; // 7 days * 24 hours const RING_HOURS = 168; // 7 days * 24 hours

106
scripts/review-poll.sh Executable file
View file

@ -0,0 +1,106 @@
#!/usr/bin/env bash
# review-poll.sh — Poll open PRs and review those with green CI
#
# Usage: ./scripts/review-poll.sh
#
# Runs from system cron. Checks all open PRs targeting master.
# Reviews unreviewed ones sequentially via review-pr.sh.
#
# Peek while running: cat /tmp/harb-review-status
# Full log: tail -f /home/debian/harb/logs/review.log
set -euo pipefail
# --- Environment (cron-safe) ---
export PATH="/home/debian/.nvm/versions/node/v22.20.0/bin:/usr/local/bin:/usr/bin:/bin:$PATH"
export HOME="${HOME:-/home/debian}"
# --- Config ---
REPO="johba/harb"
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
CODEBERG_TOKEN="$(awk '/codeberg.org/{getline;getline;print $2}' ~/.netrc)"
API_BASE="https://codeberg.org/api/v1/repos/${REPO}"
LOGDIR="/home/debian/harb/logs"
LOGFILE="$LOGDIR/review.log"
STATUSFILE="/tmp/harb-review-status"
MAX_REVIEWS=3
mkdir -p "$LOGDIR"
log() {
local ts
ts="$(date -u '+%Y-%m-%d %H:%M:%S UTC')"
echo "[$ts] $*" >> "$LOGFILE"
echo "[$ts] $*"
}
# --- Log rotation (keep last 50KB, archive once) ---
if [ -f "$LOGFILE" ] && [ "$(stat -c%s "$LOGFILE" 2>/dev/null || echo 0)" -gt 51200 ]; then
mv "$LOGFILE" "$LOGFILE.1"
# Only keep one rotated file
rm -f "$LOGFILE.2"
log "Log rotated"
fi
log "--- Poll start ---"
# --- Fetch open PRs targeting master ---
PRS=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/pulls?state=open&limit=20" | \
jq -r '.[] | select(.base.ref == "master") | "\(.number) \(.head.sha)"')
if [ -z "$PRS" ]; then
log "No open PRs targeting master"
exit 0
fi
TOTAL=$(echo "$PRS" | wc -l)
log "Found ${TOTAL} open PRs"
REVIEWED=0
SKIPPED=0
while IFS= read -r line; do
PR_NUM=$(echo "$line" | awk '{print $1}')
PR_SHA=$(echo "$line" | awk '{print $2}')
# Quick pre-check: CI status (avoid calling review-pr.sh just to skip)
CI_STATE=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/commits/${PR_SHA}/status" | jq -r '.state // "unknown"')
if [ "$CI_STATE" != "success" ]; then
log " #${PR_NUM} CI=${CI_STATE}, skip"
SKIPPED=$((SKIPPED + 1))
continue
fi
# Check for existing review at this SHA
HAS_REVIEW=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/issues/${PR_NUM}/comments?limit=50" | \
jq -r --arg sha "$PR_SHA" \
'[.[] | select(.body | contains("<!-- reviewed:")) | select(.body | contains($sha))] | length')
if [ "$HAS_REVIEW" -gt "0" ]; then
log " #${PR_NUM} already reviewed at ${PR_SHA:0:7}, skip"
SKIPPED=$((SKIPPED + 1))
continue
fi
log " #${PR_NUM} needs review (CI=success, SHA=${PR_SHA:0:7})"
if "${SCRIPT_DIR}/review-pr.sh" "$PR_NUM" 2>&1; then
REVIEWED=$((REVIEWED + 1))
else
log " #${PR_NUM} review failed"
fi
if [ "$REVIEWED" -ge "$MAX_REVIEWS" ]; then
log "Hit max reviews (${MAX_REVIEWS}), stopping"
break
fi
sleep 2
done <<< "$PRS"
log "--- Poll done: ${REVIEWED} reviewed, ${SKIPPED} skipped ---"

270
scripts/review-pr.sh Executable file
View file

@ -0,0 +1,270 @@
#!/usr/bin/env bash
# review-pr.sh — AI-powered PR review using claude CLI
#
# Usage: ./scripts/review-pr.sh <pr-number> [--force]
#
# Calls `claude -p --model sonnet` with context docs + diff.
# No tool access (pure text review), ~$0.02-0.05 per review.
#
# --force: skip the "already reviewed" check
#
# Concurrency: uses a lockfile to ensure only one review runs at a time.
# Status: writes live progress to /tmp/harb-review-status for peeking.
# Logs: /home/debian/harb/logs/review.log (auto-rotated at 100KB)
#
# Peek while running: cat /tmp/harb-review-status
# Watch log: tail -f ~/harb/logs/review.log
set -euo pipefail
# --- Environment (cron-safe) ---
export PATH="/home/debian/.nvm/versions/node/v22.20.0/bin:/usr/local/bin:/usr/bin:/bin:$PATH"
export HOME="${HOME:-/home/debian}"
# --- Config ---
PR_NUMBER="${1:?Usage: review-pr.sh <pr-number> [--force]}"
FORCE="${2:-}"
REPO="johba/harb"
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
CODEBERG_TOKEN="$(awk '/codeberg.org/{getline;getline;print $2}' ~/.netrc)"
API_BASE="https://codeberg.org/api/v1/repos/${REPO}"
LOCKFILE="/tmp/harb-review.lock"
STATUSFILE="/tmp/harb-review-status"
LOGDIR="/home/debian/harb/logs"
LOGFILE="$LOGDIR/review.log"
MIN_MEM_MB=1500
TMPDIR=$(mktemp -d)
mkdir -p "$LOGDIR"
# --- Logging ---
log() {
local ts
ts="$(date -u '+%Y-%m-%d %H:%M:%S UTC')"
echo "[$ts] PR#${PR_NUMBER} $*" | tee -a "$LOGFILE"
}
status() {
local ts
ts="$(date -u '+%Y-%m-%d %H:%M:%S UTC')"
printf '[%s] PR #%s: %s\n' "$ts" "$PR_NUMBER" "$*" > "$STATUSFILE"
log "$*"
}
cleanup() {
rm -rf "$TMPDIR"
rm -f "$LOCKFILE" "$STATUSFILE"
}
trap cleanup EXIT
# --- Log rotation (keep ~100KB + 1 archive) ---
if [ -f "$LOGFILE" ]; then
LOGSIZE=$(stat -c%s "$LOGFILE" 2>/dev/null || echo 0)
if [ "$LOGSIZE" -gt 102400 ]; then
mv "$LOGFILE" "$LOGFILE.old"
log "Log rotated (was ${LOGSIZE} bytes)"
fi
fi
# --- Memory guard ---
AVAIL_MB=$(awk '/MemAvailable/{printf "%d", $2/1024}' /proc/meminfo)
if [ "$AVAIL_MB" -lt "$MIN_MEM_MB" ]; then
log "SKIP: only ${AVAIL_MB}MB available (need ${MIN_MEM_MB}MB)"
exit 0
fi
# --- Concurrency lock ---
if [ -f "$LOCKFILE" ]; then
LOCK_PID=$(cat "$LOCKFILE" 2>/dev/null || echo "")
if [ -n "$LOCK_PID" ] && kill -0 "$LOCK_PID" 2>/dev/null; then
log "SKIP: another review running (PID ${LOCK_PID})"
exit 0
fi
log "Removing stale lock (PID ${LOCK_PID:-?})"
rm -f "$LOCKFILE"
fi
echo $$ > "$LOCKFILE"
# --- Fetch PR metadata ---
status "fetching metadata"
PR_JSON=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/pulls/${PR_NUMBER}")
PR_TITLE=$(echo "$PR_JSON" | jq -r '.title')
PR_BODY=$(echo "$PR_JSON" | jq -r '.body // ""')
PR_HEAD=$(echo "$PR_JSON" | jq -r '.head.ref')
PR_BASE=$(echo "$PR_JSON" | jq -r '.base.ref')
PR_SHA=$(echo "$PR_JSON" | jq -r '.head.sha')
PR_STATE=$(echo "$PR_JSON" | jq -r '.state')
log "${PR_TITLE} (${PR_HEAD}${PR_BASE} ${PR_SHA:0:7})"
# --- Guards ---
if [ "$PR_STATE" != "open" ]; then
log "SKIP: state=${PR_STATE}"
exit 0
fi
status "checking CI"
CI_STATE=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/commits/${PR_SHA}/status" | jq -r '.state // "unknown"')
if [ "$CI_STATE" != "success" ]; then
log "SKIP: CI=${CI_STATE}"
exit 0
fi
if [ "$FORCE" != "--force" ]; then
status "checking existing reviews"
EXISTING=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/issues/${PR_NUMBER}/comments?limit=50" | \
jq -r --arg sha "$PR_SHA" \
'[.[] | select(.body | contains("<!-- reviewed:")) | select(.body | contains($sha))] | length')
if [ "$EXISTING" -gt "0" ]; then
log "SKIP: already reviewed at ${PR_SHA:0:7}"
exit 0
fi
fi
# --- Fetch diff ---
status "fetching diff"
DIFF=$(curl -sf -H "Authorization: token ${CODEBERG_TOKEN}" \
"${API_BASE}/pulls/${PR_NUMBER}.diff" | head -c 25000)
DIFF_STAT=$(echo "$DIFF" | grep -E '^\+\+\+ b/' | sed 's|^+++ b/||' | sort)
# --- Which context docs? ---
NEEDS_UX=false
for f in $DIFF_STAT; do
case "$f" in
landing/*|web-app/*) NEEDS_UX=true ;;
esac
done
# --- Build prompt file ---
status "building prompt"
cat > "${TMPDIR}/prompt.md" << PROMPT_EOF
# PR #${PR_NUMBER}: ${PR_TITLE}
## PR Description
${PR_BODY}
## Changed Files
${DIFF_STAT}
## PRODUCT-TRUTH.md (what we can/cannot claim)
$(cat "${REPO_ROOT}/docs/PRODUCT-TRUTH.md")
## ARCHITECTURE.md
$(cat "${REPO_ROOT}/docs/ARCHITECTURE.md")
PROMPT_EOF
if [ "$NEEDS_UX" = true ] && [ -f "${REPO_ROOT}/docs/UX-DECISIONS.md" ]; then
cat >> "${TMPDIR}/prompt.md" << UX_EOF
## UX-DECISIONS.md
$(cat "${REPO_ROOT}/docs/UX-DECISIONS.md")
UX_EOF
fi
cat >> "${TMPDIR}/prompt.md" << DIFF_EOF
## Diff
\`\`\`diff
${DIFF}
\`\`\`
## Your Task
Produce a structured review:
### 1. Claim Check
Extract every factual claim about the protocol from user-facing text in the diff.
For each, verify against PRODUCT-TRUTH.md:
- ✅ Accurate
- ⚠️ Partially true (explain)
- ❌ False (cite contradiction)
If no claims, say "No user-facing claims in this diff."
### 2. Code Review
Bugs, logic errors, missing edge cases, broken imports.
### 3. Architecture Check
Does this follow patterns in ARCHITECTURE.md?
### 4. UX/Messaging Check
Does copy follow UX-DECISIONS.md?
(Skip if no UX-DECISIONS context provided.)
### 5. Verdict
**APPROVE**, **REQUEST_CHANGES**, or **DISCUSS** — one line reason.
Be direct. No filler.
DIFF_EOF
PROMPT_SIZE=$(stat -c%s "${TMPDIR}/prompt.md")
log "Prompt: ${PROMPT_SIZE} bytes"
# --- Run claude -p ---
status "running claude (sonnet)"
SECONDS=0
REVIEW=$(claude -p \
--model sonnet \
--dangerously-skip-permissions \
--output-format text \
< "${TMPDIR}/prompt.md" 2>"${TMPDIR}/claude-stderr.log")
ELAPSED=$SECONDS
CLAUDE_EXIT=$?
if [ $CLAUDE_EXIT -ne 0 ]; then
log "ERROR: claude exited ${CLAUDE_EXIT} after ${ELAPSED}s"
log "stderr: $(cat "${TMPDIR}/claude-stderr.log" | tail -5)"
exit 1
fi
if [ -z "$REVIEW" ]; then
log "ERROR: empty review after ${ELAPSED}s"
exit 1
fi
REVIEW_SIZE=$(echo "$REVIEW" | wc -c)
log "Review: ${REVIEW_SIZE} bytes in ${ELAPSED}s"
# --- Post to Codeberg ---
status "posting to Codeberg"
COMMENT_BODY="## 🤖 AI Review
<!-- reviewed: ${PR_SHA} -->
${REVIEW}
---
*Reviewed at \`${PR_SHA:0:7}\` · [PRODUCT-TRUTH.md](../docs/PRODUCT-TRUTH.md) · [ARCHITECTURE.md](../docs/ARCHITECTURE.md)*"
POST_CODE=$(curl -sf -o /dev/null -w "%{http_code}" \
-X POST \
-H "Authorization: token ${CODEBERG_TOKEN}" \
-H "Content-Type: application/json" \
"${API_BASE}/issues/${PR_NUMBER}/comments" \
-d "$(jq -n --arg body "$COMMENT_BODY" '{body: $body}')")
if [ "${POST_CODE}" = "201" ]; then
log "POSTED to Codeberg"
else
log "ERROR: Codeberg HTTP ${POST_CODE}"
echo "$REVIEW" > "${LOGDIR}/review-pr${PR_NUMBER}-${PR_SHA:0:7}.md"
log "Review saved to ${LOGDIR}/review-pr${PR_NUMBER}-${PR_SHA:0:7}.md"
exit 1
fi
# --- Notify OpenClaw (best effort) ---
VERDICT=$(echo "$REVIEW" | grep -oP '\*\*(APPROVE|REQUEST_CHANGES|DISCUSS)\*\*' | head -1 | tr -d '*')
if command -v openclaw &>/dev/null; then
openclaw system event \
--text "🤖 PR #${PR_NUMBER} reviewed: ${VERDICT:-UNKNOWN}${PR_TITLE}" \
--mode now 2>/dev/null || true
fi
log "DONE: ${VERDICT:-UNKNOWN} (${ELAPSED}s)"

View file

@ -21,16 +21,14 @@ type Query {
stackMetas(where: stackMetaFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): stackMetaPage! stackMetas(where: stackMetaFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): stackMetaPage!
stats(id: String!): stats stats(id: String!): stats
statss(where: statsFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): statsPage! statss(where: statsFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): statsPage!
ethReserveHistory(id: String!): ethReserveHistory
ethReserveHistorys(where: ethReserveHistoryFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): ethReserveHistoryPage!
feeHistory(id: String!): feeHistory
feeHistorys(where: feeHistoryFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): feeHistoryPage!
positions(id: String!): positions positions(id: String!): positions
positionss(where: positionsFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): positionsPage! positionss(where: positionsFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): positionsPage!
recenters(id: String!): recenters recenters(id: String!): recenters
recenterss(where: recentersFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): recentersPage! recenterss(where: recentersFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): recentersPage!
holders(address: String!): holders holders(address: String!): holders
holderss(where: holdersFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): holdersPage! holderss(where: holdersFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): holdersPage!
transactions(id: String!): transactions
transactionss(where: transactionsFilter, orderBy: String, orderDirection: String, before: String, after: String, limit: Int): transactionsPage!
_meta: Meta _meta: Meta
} }
@ -107,7 +105,6 @@ type stats {
totalMinted: BigInt! totalMinted: BigInt!
totalBurned: BigInt! totalBurned: BigInt!
totalTaxPaid: BigInt! totalTaxPaid: BigInt!
totalUbiClaimed: BigInt!
mintedLastWeek: BigInt! mintedLastWeek: BigInt!
mintedLastDay: BigInt! mintedLastDay: BigInt!
mintNextHourProjected: BigInt! mintNextHourProjected: BigInt!
@ -117,9 +114,10 @@ type stats {
taxPaidLastWeek: BigInt! taxPaidLastWeek: BigInt!
taxPaidLastDay: BigInt! taxPaidLastDay: BigInt!
taxPaidNextHourProjected: BigInt! taxPaidNextHourProjected: BigInt!
ubiClaimedLastWeek: BigInt! ethReserveLastDay: BigInt!
ubiClaimedLastDay: BigInt! ethReserveLastWeek: BigInt!
ubiClaimedNextHourProjected: BigInt! netSupplyChangeDay: BigInt!
netSupplyChangeWeek: BigInt!
ringBufferPointer: Int! ringBufferPointer: Int!
lastHourlyUpdateTimestamp: BigInt! lastHourlyUpdateTimestamp: BigInt!
ringBuffer: JSON! ringBuffer: JSON!
@ -224,14 +222,6 @@ input statsFilter {
totalTaxPaid_lt: BigInt totalTaxPaid_lt: BigInt
totalTaxPaid_gte: BigInt totalTaxPaid_gte: BigInt
totalTaxPaid_lte: BigInt totalTaxPaid_lte: BigInt
totalUbiClaimed: BigInt
totalUbiClaimed_not: BigInt
totalUbiClaimed_in: [BigInt]
totalUbiClaimed_not_in: [BigInt]
totalUbiClaimed_gt: BigInt
totalUbiClaimed_lt: BigInt
totalUbiClaimed_gte: BigInt
totalUbiClaimed_lte: BigInt
mintedLastWeek: BigInt mintedLastWeek: BigInt
mintedLastWeek_not: BigInt mintedLastWeek_not: BigInt
mintedLastWeek_in: [BigInt] mintedLastWeek_in: [BigInt]
@ -304,30 +294,38 @@ input statsFilter {
taxPaidNextHourProjected_lt: BigInt taxPaidNextHourProjected_lt: BigInt
taxPaidNextHourProjected_gte: BigInt taxPaidNextHourProjected_gte: BigInt
taxPaidNextHourProjected_lte: BigInt taxPaidNextHourProjected_lte: BigInt
ubiClaimedLastWeek: BigInt ethReserveLastDay: BigInt
ubiClaimedLastWeek_not: BigInt ethReserveLastDay_not: BigInt
ubiClaimedLastWeek_in: [BigInt] ethReserveLastDay_in: [BigInt]
ubiClaimedLastWeek_not_in: [BigInt] ethReserveLastDay_not_in: [BigInt]
ubiClaimedLastWeek_gt: BigInt ethReserveLastDay_gt: BigInt
ubiClaimedLastWeek_lt: BigInt ethReserveLastDay_lt: BigInt
ubiClaimedLastWeek_gte: BigInt ethReserveLastDay_gte: BigInt
ubiClaimedLastWeek_lte: BigInt ethReserveLastDay_lte: BigInt
ubiClaimedLastDay: BigInt ethReserveLastWeek: BigInt
ubiClaimedLastDay_not: BigInt ethReserveLastWeek_not: BigInt
ubiClaimedLastDay_in: [BigInt] ethReserveLastWeek_in: [BigInt]
ubiClaimedLastDay_not_in: [BigInt] ethReserveLastWeek_not_in: [BigInt]
ubiClaimedLastDay_gt: BigInt ethReserveLastWeek_gt: BigInt
ubiClaimedLastDay_lt: BigInt ethReserveLastWeek_lt: BigInt
ubiClaimedLastDay_gte: BigInt ethReserveLastWeek_gte: BigInt
ubiClaimedLastDay_lte: BigInt ethReserveLastWeek_lte: BigInt
ubiClaimedNextHourProjected: BigInt netSupplyChangeDay: BigInt
ubiClaimedNextHourProjected_not: BigInt netSupplyChangeDay_not: BigInt
ubiClaimedNextHourProjected_in: [BigInt] netSupplyChangeDay_in: [BigInt]
ubiClaimedNextHourProjected_not_in: [BigInt] netSupplyChangeDay_not_in: [BigInt]
ubiClaimedNextHourProjected_gt: BigInt netSupplyChangeDay_gt: BigInt
ubiClaimedNextHourProjected_lt: BigInt netSupplyChangeDay_lt: BigInt
ubiClaimedNextHourProjected_gte: BigInt netSupplyChangeDay_gte: BigInt
ubiClaimedNextHourProjected_lte: BigInt netSupplyChangeDay_lte: BigInt
netSupplyChangeWeek: BigInt
netSupplyChangeWeek_not: BigInt
netSupplyChangeWeek_in: [BigInt]
netSupplyChangeWeek_not_in: [BigInt]
netSupplyChangeWeek_gt: BigInt
netSupplyChangeWeek_lt: BigInt
netSupplyChangeWeek_gte: BigInt
netSupplyChangeWeek_lte: BigInt
ringBufferPointer: Int ringBufferPointer: Int
ringBufferPointer_not: Int ringBufferPointer_not: Int
ringBufferPointer_in: [Int] ringBufferPointer_in: [Int]
@ -474,101 +472,6 @@ input statsFilter {
floorDistanceBps_lte: Int floorDistanceBps_lte: Int
} }
type ethReserveHistory {
id: String!
timestamp: BigInt!
ethBalance: BigInt!
}
type ethReserveHistoryPage {
items: [ethReserveHistory!]!
pageInfo: PageInfo!
totalCount: Int!
}
input ethReserveHistoryFilter {
AND: [ethReserveHistoryFilter]
OR: [ethReserveHistoryFilter]
id: String
id_not: String
id_in: [String]
id_not_in: [String]
id_contains: String
id_not_contains: String
id_starts_with: String
id_ends_with: String
id_not_starts_with: String
id_not_ends_with: String
timestamp: BigInt
timestamp_not: BigInt
timestamp_in: [BigInt]
timestamp_not_in: [BigInt]
timestamp_gt: BigInt
timestamp_lt: BigInt
timestamp_gte: BigInt
timestamp_lte: BigInt
ethBalance: BigInt
ethBalance_not: BigInt
ethBalance_in: [BigInt]
ethBalance_not_in: [BigInt]
ethBalance_gt: BigInt
ethBalance_lt: BigInt
ethBalance_gte: BigInt
ethBalance_lte: BigInt
}
type feeHistory {
id: String!
timestamp: BigInt!
ethFees: BigInt!
krkFees: BigInt!
}
type feeHistoryPage {
items: [feeHistory!]!
pageInfo: PageInfo!
totalCount: Int!
}
input feeHistoryFilter {
AND: [feeHistoryFilter]
OR: [feeHistoryFilter]
id: String
id_not: String
id_in: [String]
id_not_in: [String]
id_contains: String
id_not_contains: String
id_starts_with: String
id_ends_with: String
id_not_starts_with: String
id_not_ends_with: String
timestamp: BigInt
timestamp_not: BigInt
timestamp_in: [BigInt]
timestamp_not_in: [BigInt]
timestamp_gt: BigInt
timestamp_lt: BigInt
timestamp_gte: BigInt
timestamp_lte: BigInt
ethFees: BigInt
ethFees_not: BigInt
ethFees_in: [BigInt]
ethFees_not_in: [BigInt]
ethFees_gt: BigInt
ethFees_lt: BigInt
ethFees_gte: BigInt
ethFees_lte: BigInt
krkFees: BigInt
krkFees_not: BigInt
krkFees_in: [BigInt]
krkFees_not_in: [BigInt]
krkFees_gt: BigInt
krkFees_lt: BigInt
krkFees_gte: BigInt
krkFees_lte: BigInt
}
type positions { type positions {
id: String! id: String!
owner: String! owner: String!
@ -820,6 +723,8 @@ input recentersFilter {
type holders { type holders {
address: String! address: String!
balance: BigInt! balance: BigInt!
totalEthSpent: BigInt!
totalTokensAcquired: BigInt!
} }
type holdersPage { type holdersPage {
@ -849,4 +754,114 @@ input holdersFilter {
balance_lt: BigInt balance_lt: BigInt
balance_gte: BigInt balance_gte: BigInt
balance_lte: BigInt balance_lte: BigInt
totalEthSpent: BigInt
totalEthSpent_not: BigInt
totalEthSpent_in: [BigInt]
totalEthSpent_not_in: [BigInt]
totalEthSpent_gt: BigInt
totalEthSpent_lt: BigInt
totalEthSpent_gte: BigInt
totalEthSpent_lte: BigInt
totalTokensAcquired: BigInt
totalTokensAcquired_not: BigInt
totalTokensAcquired_in: [BigInt]
totalTokensAcquired_not_in: [BigInt]
totalTokensAcquired_gt: BigInt
totalTokensAcquired_lt: BigInt
totalTokensAcquired_gte: BigInt
totalTokensAcquired_lte: BigInt
}
type transactions {
id: String!
holder: String!
type: String!
tokenAmount: BigInt!
ethAmount: BigInt!
timestamp: BigInt!
blockNumber: Int!
txHash: String!
}
type transactionsPage {
items: [transactions!]!
pageInfo: PageInfo!
totalCount: Int!
}
input transactionsFilter {
AND: [transactionsFilter]
OR: [transactionsFilter]
id: String
id_not: String
id_in: [String]
id_not_in: [String]
id_contains: String
id_not_contains: String
id_starts_with: String
id_ends_with: String
id_not_starts_with: String
id_not_ends_with: String
holder: String
holder_not: String
holder_in: [String]
holder_not_in: [String]
holder_contains: String
holder_not_contains: String
holder_starts_with: String
holder_ends_with: String
holder_not_starts_with: String
holder_not_ends_with: String
type: String
type_not: String
type_in: [String]
type_not_in: [String]
type_contains: String
type_not_contains: String
type_starts_with: String
type_ends_with: String
type_not_starts_with: String
type_not_ends_with: String
tokenAmount: BigInt
tokenAmount_not: BigInt
tokenAmount_in: [BigInt]
tokenAmount_not_in: [BigInt]
tokenAmount_gt: BigInt
tokenAmount_lt: BigInt
tokenAmount_gte: BigInt
tokenAmount_lte: BigInt
ethAmount: BigInt
ethAmount_not: BigInt
ethAmount_in: [BigInt]
ethAmount_not_in: [BigInt]
ethAmount_gt: BigInt
ethAmount_lt: BigInt
ethAmount_gte: BigInt
ethAmount_lte: BigInt
timestamp: BigInt
timestamp_not: BigInt
timestamp_in: [BigInt]
timestamp_not_in: [BigInt]
timestamp_gt: BigInt
timestamp_lt: BigInt
timestamp_gte: BigInt
timestamp_lte: BigInt
blockNumber: Int
blockNumber_not: Int
blockNumber_in: [Int]
blockNumber_not_in: [Int]
blockNumber_gt: Int
blockNumber_lt: Int
blockNumber_gte: Int
blockNumber_lte: Int
txHash: String
txHash_not: String
txHash_in: [String]
txHash_not_in: [String]
txHash_contains: String
txHash_not_contains: String
txHash_starts_with: String
txHash_ends_with: String
txHash_not_starts_with: String
txHash_not_ends_with: String
} }

View file

@ -216,6 +216,10 @@ export async function updateHourlyData(context: StatsContext, timestamp: bigint)
const lastUpdate = statsData.lastHourlyUpdateTimestamp ?? 0n; const lastUpdate = statsData.lastHourlyUpdateTimestamp ?? 0n;
// Snapshot current holderCount into ring buffer slot 3 // Snapshot current holderCount into ring buffer slot 3
// NOTE: Slot 3 migrated from cumulative tax to holderCount in PR #177.
// Existing ring buffer data will contain stale tax values interpreted as
// holder counts for up to 7 days (168 hours) post-deploy until the buffer
// fully rotates. Data self-heals as new hourly snapshots overwrite old slots.
const currentHolderCount = BigInt(statsData.holderCount ?? 0); const currentHolderCount = BigInt(statsData.holderCount ?? 0);
const base = pointer * RING_BUFFER_SEGMENTS; const base = pointer * RING_BUFFER_SEGMENTS;
ringBuffer[base + 3] = currentHolderCount; ringBuffer[base + 3] = currentHolderCount;