diff --git a/services/ponder/src/api/index.ts b/services/ponder/src/api/index.ts index 362c323..bb0c19e 100644 --- a/services/ponder/src/api/index.ts +++ b/services/ponder/src/api/index.ts @@ -1,4 +1,5 @@ import { Hono } from 'hono'; +import type { Context, Next } from 'hono'; import { cors } from 'hono/cors'; import { client, graphql } from 'ponder'; import { db } from 'ponder:api'; @@ -19,11 +20,96 @@ app.use( }) ); +// Server-side GraphQL response cache. +// +// Without this, every polling client (O(users × 1/poll_interval)) generates a +// separate DB query. With this 5 s cache + in-flight coalescing, the effective +// DB hit rate is capped at O(1/5s) regardless of how many clients poll. +// +// The TTL matches the Caddy `Cache-Control: public, max-age=5` header so that +// if a caching CDN or proxy is added later the two layers stay in sync. + +const GRAPHQL_CACHE_TTL_MS = 5_000; // 5 seconds – matches Caddy max-age=5 + +const responseCache = new Map(); + +// In-flight map: when concurrent requests arrive for the same query before the +// first response is ready, they all await the same DB hit instead of each +// issuing their own. +const inFlight = new Map>(); + +// Evict expired entries periodically so the cache stays bounded in memory. +const evictInterval = setInterval(() => { + const now = Date.now(); + for (const [k, v] of responseCache) { + if (v.expiresAt <= now) responseCache.delete(k); + } +}, 30_000); +// Don't keep the process alive just for eviction. +(evictInterval as unknown as { unref?: () => void }).unref?.(); + +async function graphqlCache(c: Context, next: Next): Promise { + if (c.req.method !== 'GET' && c.req.method !== 'POST') return next(); + + // Build a stable cache key without consuming the original request body so + // the downstream graphql() handler can still read it. + const cacheKey = + c.req.method === 'POST' + ? await c.req.raw.clone().text() + : new URL(c.req.url).search; + + const now = Date.now(); + + // 1. Cache hit – serve immediately, no DB involved. + const hit = responseCache.get(cacheKey); + if (hit && hit.expiresAt > now) { + return c.body(hit.body, 200, { 'Content-Type': 'application/json' }); + } + + // 2. In-flight coalescing – N concurrent identical queries share one DB hit. + const flying = inFlight.get(cacheKey); + if (flying) { + const body = await flying; + if (body !== null) { + return c.body(body, 200, { 'Content-Type': 'application/json' }); + } + // The in-flight request errored; fall through and try again fresh. + } + + // 3. Cache miss – run the real graphql() handler and cache a successful result. + const promise = (async (): Promise => { + await next(); + if (c.res.status !== 200) return null; + const body = await c.res.clone().text(); + try { + const parsed = JSON.parse(body) as { errors?: unknown }; + if (!parsed.errors) { + responseCache.set(cacheKey, { body, expiresAt: now + GRAPHQL_CACHE_TTL_MS }); + return body; + } + } catch { + // Non-JSON response; don't cache. + } + return null; + })(); + + inFlight.set(cacheKey, promise); + promise.finally(() => inFlight.delete(cacheKey)); + + const body = await promise; + if (body !== null) { + return c.body(body, 200, { 'Content-Type': 'application/json' }); + } + // Error path: graphql() already populated c.res; let Hono send it as-is. +} + // SQL endpoint app.use('/sql/*', client({ db, schema })); -// GraphQL endpoints +// GraphQL endpoints with server-side caching + in-flight coalescing +app.use('/graphql', graphqlCache); app.use('/graphql', graphql({ db, schema })); +app.use('/', graphqlCache); app.use('/', graphql({ db, schema })); export default app;