#!/usr/bin/env bash # ============================================================================= # batch-eval.sh — revm-based batch fitness evaluator # # Replaces the per-candidate Anvil+forge-script pipeline with in-process EVM # execution via Foundry's native revm backend (FitnessEvaluator.t.sol). # # Speedup: compiles each candidate once (unavoidable — different Solidity per # candidate), then runs ALL attack sequences in a single in-process forge test # with O(1) memory snapshot/revert instead of RPC calls per attack. # # Usage: # ./tools/push3-evolution/revm-evaluator/batch-eval.sh \ # [--output-dir /tmp/scores] \ # candidate0.push3 candidate1.push3 ... # # Output (stdout): # One JSON object per candidate: # {"candidate_id":"gen0_c000","fitness":123456789} # # Exit codes: # 0 Success. # 1 Candidate-level error (transpile/compile failed for at least one candidate). # 2 Infrastructure error (missing tool, BASE_RPC_URL not set, forge test failed). # # Environment: # BASE_RPC_URL Required. Base network RPC endpoint for forking. # ATTACKS_DIR Optional. Path to *.jsonl attack files. # (default: /onchain/script/backtesting/attacks) # OUTPUT_DIR Optional. Directory to copy scores.jsonl into (--output-dir overrides). # ============================================================================= set -euo pipefail export PATH="${HOME}/.foundry/bin:${PATH}" SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" ONCHAIN_DIR="$REPO_ROOT/onchain" TRANSPILER_DIR="$REPO_ROOT/tools/push3-transpiler" TRANSPILER_OUT="$ONCHAIN_DIR/src/OptimizerV3Push3.sol" # Use OptimizerV3 (inherits Optimizer → UUPS compatible, has getLiquidityParams) # instead of standalone OptimizerV3Push3 which lacks UUPS hooks. OPTIMIZERV3_SOL="$ONCHAIN_DIR/src/OptimizerV3.sol" ARTIFACT_PATH="$ONCHAIN_DIR/out/OptimizerV3.sol/OptimizerV3.json" DEFAULT_ATTACKS_DIR="$ONCHAIN_DIR/script/backtesting/attacks" # ============================================================================= # Argument parsing # ============================================================================= OUTPUT_DIR="${OUTPUT_DIR:-}" declare -a PUSH3_FILES=() while [[ $# -gt 0 ]]; do case $1 in --output-dir) OUTPUT_DIR="$2"; shift 2 ;; --*) echo "Unknown option: $1" >&2; exit 2 ;; *) PUSH3_FILES+=("$1"); shift ;; esac done if [ "${#PUSH3_FILES[@]}" -eq 0 ]; then echo "Usage: $0 [--output-dir DIR] candidate1.push3 ..." >&2 exit 2 fi # ============================================================================= # Environment checks # ============================================================================= BASE_RPC_URL="${BASE_RPC_URL:-}" if [ -z "$BASE_RPC_URL" ]; then echo " [batch-eval] ERROR: BASE_RPC_URL env var required for Base network fork" >&2 exit 2 fi for _tool in forge node python3; do command -v "$_tool" &>/dev/null || { echo " [batch-eval] ERROR: $_tool not found in PATH" >&2; exit 2; } done # ============================================================================= # Helpers # ============================================================================= log() { echo " [batch-eval] $*" >&2; } fail2() { echo " [batch-eval] ERROR: $*" >&2; exit 2; } # ============================================================================= # Step 1 — Ensure transpiler dependencies are installed # ============================================================================= if [ ! -d "$TRANSPILER_DIR/node_modules" ]; then log "Installing transpiler dependencies…" (cd "$TRANSPILER_DIR" && npm install --silent) || fail2 "npm install in push3-transpiler failed" fi # ============================================================================= # Step 2 — Transpile + compile each candidate, extract bytecodes into manifest # ============================================================================= MANIFEST_DIR="$(mktemp -d)" IDS_FILE="$MANIFEST_DIR/ids.txt" BYTECODES_FILE="$MANIFEST_DIR/bytecodes.txt" : > "$IDS_FILE" : > "$BYTECODES_FILE" COMPILED_COUNT=0 FAILED_IDS="" for PUSH3_FILE in "${PUSH3_FILES[@]}"; do PUSH3_FILE="$(cd "$(dirname "$PUSH3_FILE")" && pwd)/$(basename "$PUSH3_FILE")" CANDIDATE_ID="$(basename "$PUSH3_FILE" .push3)" # Transpile Push3 → OptimizerV3Push3.sol TRANSPILE_EC=0 ( cd "$TRANSPILER_DIR" npx ts-node src/index.ts "$PUSH3_FILE" "$TRANSPILER_OUT" ) >/dev/null 2>&1 || TRANSPILE_EC=$? if [ "$TRANSPILE_EC" -ne 0 ]; then log "WARNING: transpile failed for $CANDIDATE_ID (exit $TRANSPILE_EC) — skipping" FAILED_IDS="$FAILED_IDS $CANDIDATE_ID" continue fi # Inject transpiled calculateParams body into OptimizerV3.sol (UUPS-compatible). # Extract function body from OptimizerV3Push3.sol and replace content between # BEGIN/END markers in OptimizerV3.sol. python3 - "$TRANSPILER_OUT" "$OPTIMIZERV3_SOL" <<'PYEOF' || { import sys push3_path = sys.argv[1] v3_path = sys.argv[2] # Extract function body from OptimizerV3Push3.sol # Find "function calculateParams" then extract everything between the opening { and # the matching closing } at the same indent level (4 spaces / function level) with open(push3_path) as f: push3 = f.read() # Find body start: line after "function calculateParams...{" fn_start = push3.find("function calculateParams") if fn_start == -1: sys.exit("calculateParams not found in OptimizerV3Push3") brace_start = push3.find("{", fn_start) body_start = push3.index("\n", brace_start) + 1 # Find body end: the closing " }" of the function (4-space indent, before contract close) # Walk backwards from end to find the function-level closing brace lines = push3[body_start:].split("\n") body_lines = [] for line in lines: if line.strip() == "}" and (line.startswith(" }") or line == "}"): # This is the function-closing brace break body_lines.append(line) body = "\n".join(body_lines) # Now inject into OptimizerV3.sol between markers with open(v3_path) as f: v3 = f.read() begin_marker = "// ── BEGIN TRANSPILER OUTPUT" end_marker = "// ── END TRANSPILER OUTPUT" begin_idx = v3.find(begin_marker) end_idx = v3.find(end_marker) if begin_idx == -1 or end_idx == -1: sys.exit("markers not found in OptimizerV3.sol") begin_line_end = v3.index("\n", begin_idx) + 1 # Keep the end marker line intact with open(v3_path, "w") as f: f.write(v3[:begin_line_end]) f.write(body + "\n") f.write(v3[end_idx:]) PYEOF log "WARNING: failed to inject calculateParams into OptimizerV3.sol for $CANDIDATE_ID — skipping" FAILED_IDS="$FAILED_IDS $CANDIDATE_ID" continue } # Compile (forge's incremental build skips unchanged files quickly) FORGE_EC=0 (cd "$ONCHAIN_DIR" && forge build --silent) >/dev/null 2>&1 || FORGE_EC=$? if [ "$FORGE_EC" -ne 0 ]; then log "WARNING: forge build failed for $CANDIDATE_ID (exit $FORGE_EC) — skipping" FAILED_IDS="$FAILED_IDS $CANDIDATE_ID" continue fi # Extract bytecode from artifact (strip leading 0x if present) BYTECODE_HEX="$(python3 - "$ARTIFACT_PATH" <<'PYEOF' import json, sys with open(sys.argv[1]) as f: d = json.load(f) bytecode = d["deployedBytecode"]["object"] # Ensure 0x prefix if not bytecode.startswith("0x"): bytecode = "0x" + bytecode print(bytecode) PYEOF )" || { log "WARNING: failed to extract bytecode for $CANDIDATE_ID — skipping"; FAILED_IDS="$FAILED_IDS $CANDIDATE_ID"; continue; } if [ -z "$BYTECODE_HEX" ] || [ "$BYTECODE_HEX" = "0x" ]; then log "WARNING: empty bytecode for $CANDIDATE_ID — skipping" FAILED_IDS="$FAILED_IDS $CANDIDATE_ID" continue fi printf '%s\n' "$CANDIDATE_ID" >> "$IDS_FILE" printf '%s\n' "$BYTECODE_HEX" >> "$BYTECODES_FILE" COMPILED_COUNT=$((COMPILED_COUNT + 1)) log "Compiled $CANDIDATE_ID" done if [ "$COMPILED_COUNT" -eq 0 ]; then fail2 "No candidates compiled successfully — aborting" fi log "Compiled $COMPILED_COUNT / ${#PUSH3_FILES[@]} candidates" # ============================================================================= # Step 3 — Run FitnessEvaluator.t.sol (in-process revm, all candidates at once) # ============================================================================= ATTACKS_DIR="${ATTACKS_DIR:-$DEFAULT_ATTACKS_DIR}" log "Running FitnessEvaluator.t.sol (in-process revm, fork: $BASE_RPC_URL)…" FORGE_TEST_EC=0 FORGE_OUTPUT="$( cd "$ONCHAIN_DIR" BASE_RPC_URL="$BASE_RPC_URL" \ FITNESS_MANIFEST_DIR="$MANIFEST_DIR" \ ATTACKS_DIR="$ATTACKS_DIR" \ forge test \ --match-contract FitnessEvaluator \ --match-test testBatchEvaluate \ -vv \ --no-match-path "NOT_A_REAL_PATH" \ 2>&1 )" || FORGE_TEST_EC=$? if [ "$FORGE_TEST_EC" -ne 0 ]; then # Surface forge output on failure for diagnosis printf '%s\n' "$FORGE_OUTPUT" >&2 fail2 "forge test failed (exit $FORGE_TEST_EC)" fi # ============================================================================= # Step 4 — Extract and emit score JSON lines # # forge test -vv wraps console.log output with leading spaces and a "Logs:" header. # We grep for lines containing the score JSON pattern and strip the indentation. # ============================================================================= SCORES_JSONL="$(printf '%s\n' "$FORGE_OUTPUT" | grep -E '"candidate_id"' | sed 's/^[[:space:]]*//' || true)" if [ -z "$SCORES_JSONL" ]; then printf '%s\n' "$FORGE_OUTPUT" >&2 fail2 "No score lines found in forge test output" fi # Emit scores to stdout printf '%s\n' "$SCORES_JSONL" # Optionally write to output directory if [ -n "$OUTPUT_DIR" ]; then mkdir -p "$OUTPUT_DIR" printf '%s\n' "$SCORES_JSONL" > "$OUTPUT_DIR/scores.jsonl" log "Scores written to $OUTPUT_DIR/scores.jsonl" fi # Warn about any candidates that were skipped (compile failures) if [ -n "$FAILED_IDS" ]; then log "WARNING: the following candidates were skipped (compile failed): $FAILED_IDS" exit 1 fi log "Done — scored $COMPILED_COUNT candidates"