diff --git a/keeperhub-events/event-tracker/package.json b/keeperhub-events/event-tracker/package.json index 89b7a7f43..c5ed39edc 100644 --- a/keeperhub-events/event-tracker/package.json +++ b/keeperhub-events/event-tracker/package.json @@ -20,11 +20,13 @@ "@aws-sdk/client-sqs": "^3.1005.0", "ethers": "^6.13.4", "ioredis": "^5.4.2", - "uuid": "^14.0.0" + "uuid": "^14.0.0", + "ws": "^8.17.1" }, "devDependencies": { "@types/node": "^24.12.2", "@types/uuid": "^10.0.0", + "@types/ws": "^8.5.13", "tsup": "^8.3.5", "tsx": "^4.0.0", "vite": "^8.0.9", diff --git a/keeperhub-events/event-tracker/src/chains/provider-manager.ts b/keeperhub-events/event-tracker/src/chains/provider-manager.ts index daa556b30..0fa795033 100644 --- a/keeperhub-events/event-tracker/src/chains/provider-manager.ts +++ b/keeperhub-events/event-tracker/src/chains/provider-manager.ts @@ -1,4 +1,5 @@ import { ethers } from "ethers"; +import { WebSocket } from "ws"; import { logger } from "../../lib/utils/logger"; /** @@ -39,6 +40,15 @@ const HEARTBEAT_TIMEOUT_MS = 10_000; const INITIAL_RECONNECT_DELAY_MS = 1_000; const MAX_RECONNECT_DELAY_MS = 60_000; const MAX_RECONNECT_ATTEMPTS = 10; +/** + * Cap on `eth_subscribe(["newHeads"])` round-trip during the probe in + * `probeSubscriptionSupport`. An upstream that accepts the WS handshake + * but never answers the JSON-RPC frame (silent backend, broken proxy) would + * otherwise block `createProvider` forever. 10 s matches the heartbeat + * timeout in `startHeartbeat` so the two reachability gates fail at the + * same scale. + */ +const PROBE_TIMEOUT_MS = 10_000; export type LogHandler = (log: ethers.Log) => void | Promise; export type Unsubscribe = () => void; @@ -60,7 +70,19 @@ export type DisconnectHandler = (ev: DisconnectEvent) => void | Promise; export interface ChainHealth { chainId: number; + /** + * The URL the live provider was opened against, or the configured + * primary if no provider is currently connected. Equals the configured + * fallback when the most recent successful (re)connect landed on it; + * resets to the configured primary during a mid-reconnect window + * because `reconnect()` clears `activeWssUrl` before re-attempting. + */ wssUrl: string; + /** + * Configured fallback URL, or null if none. Surfaced so operators can + * see whether failover capacity exists for this chain. + */ + fallbackWssUrl: string | null; connected: boolean; reconnecting: boolean; lastBlockAt: number | null; @@ -78,6 +100,11 @@ export interface ChainHealth { export interface SubscribeOptions { chainId: number; wssUrl: string; + /** + * Optional secondary URL tried when the primary fails at provider + * creation or reconnect. See `ChainEntry.fallbackWssUrl`. + */ + fallbackWssUrl?: string; address: string; topic0: string; handler: LogHandler; @@ -96,7 +123,24 @@ interface Subscriber { interface ChainEntry { chainId: number; + /** + * Configured primary URL; immutable once the entry is created. Each + * (re)connect attempt tries this first. + */ wssUrl: string; + /** + * Configured fallback URL, immutable once the entry is created. Tried + * only when the primary attempt fails (factory throws, `provider.ready` + * rejects, or `eth_subscribe` probe rejects). Reconnects always start + * over from primary so a primary that recovers is preferred. + */ + fallbackWssUrl: string | null; + /** + * Which URL the live provider was created from. Equal to `wssUrl` on + * the common path, equal to `fallbackWssUrl` when the primary failed + * at the last (re)connect, null when no provider is live. + */ + activeWssUrl: string | null; provider: ethers.WebSocketProvider | null; readyPromise: Promise | null; /** @@ -121,8 +165,30 @@ interface ChainEntry { disconnectHandlers: Set; } +/** + * Wrap socket construction so we can attach an EventEmitter-style `error` + * listener synchronously, before ethers' WebSocketProvider has had a + * chance to assign its own `onerror`. Without this, an early ws-layer + * error (DNS NXDOMAIN, ECONNREFUSED, non-WS server returning HTTP 200) + * fires on a listenerless EventEmitter, gets re-thrown synchronously, + * escapes openProvider's try/catch as `uncaughtException`, and `index.ts` + * exits the pod - which would crashloop the whole event-tracker on a + * misconfigured WSS URL even when a healthy fallback is configured. + * + * The listener is a no-op: failures still reject `provider.ready` via + * ethers' onerror (assigned shortly after we return), and that rejection + * is what openProvider catches to walk to the fallback. We just need + * *some* error listener to be on the ws by the time the connection + * attempt resolves. + */ const defaultFactory: ProviderFactory = (wssUrl) => - new ethers.WebSocketProvider(wssUrl); + new ethers.WebSocketProvider(() => { + const socket = new WebSocket(wssUrl); + socket.on("error", () => { + // intentionally empty - see comment on defaultFactory + }); + return socket; + }); const defaultOnPermanentFailure = (chainId: number): void => { logger.error( @@ -160,8 +226,9 @@ export class ChainProviderManager { async getOrCreateProvider( chainId: number, wssUrl: string, + fallbackWssUrl?: string, ): Promise { - const entry = this.ensureEntry(chainId, wssUrl); + const entry = this.ensureEntry(chainId, wssUrl, fallbackWssUrl); // If a reconnect loop is live, wait for it to settle before checking // the provider. Without this, a new subscriber arriving while the @@ -200,8 +267,16 @@ export class ChainProviderManager { } async subscribeToLogs(opts: SubscribeOptions): Promise { - const entry = this.ensureEntry(opts.chainId, opts.wssUrl); - await this.getOrCreateProvider(opts.chainId, opts.wssUrl); + const entry = this.ensureEntry( + opts.chainId, + opts.wssUrl, + opts.fallbackWssUrl, + ); + await this.getOrCreateProvider( + opts.chainId, + opts.wssUrl, + opts.fallbackWssUrl, + ); const subscriber: Subscriber = { address: opts.address.toLowerCase(), @@ -290,9 +365,25 @@ export class ChainProviderManager { if (!entry) { return null; } + return this.toHealth(entry); + } + + getAllHealth(): ChainHealth[] { + const out: ChainHealth[] = []; + for (const entry of this.chains.values()) { + out.push(this.toHealth(entry)); + } + return out; + } + + private toHealth(entry: ChainEntry): ChainHealth { return { chainId: entry.chainId, - wssUrl: entry.wssUrl, + // Active URL when a provider is live, primary otherwise. Lets + // operators see whether failover kicked in without exposing a + // stale "active" value when nothing is connected. + wssUrl: entry.activeWssUrl ?? entry.wssUrl, + fallbackWssUrl: entry.fallbackWssUrl, connected: entry.provider != null && !entry.isReconnecting, reconnecting: entry.isReconnecting, lastBlockAt: entry.lastBlockAt, @@ -301,22 +392,6 @@ export class ChainProviderManager { }; } - getAllHealth(): ChainHealth[] { - const out: ChainHealth[] = []; - for (const entry of this.chains.values()) { - out.push({ - chainId: entry.chainId, - wssUrl: entry.wssUrl, - connected: entry.provider != null && !entry.isReconnecting, - reconnecting: entry.isReconnecting, - lastBlockAt: entry.lastBlockAt, - subscriberCount: entry.subscribers.size, - lastCreateError: entry.lastCreateError, - }); - } - return out; - } - async destroy(): Promise { this.isDestroyed = true; // Wake every reconnect loop that is currently sleeping. The loop @@ -346,6 +421,7 @@ export class ChainProviderManager { entry.subscribers.clear(); entry.disconnectHandlers.clear(); entry.provider = null; + entry.activeWssUrl = null; entry.readyPromise = null; } this.chains.clear(); @@ -358,12 +434,20 @@ export class ChainProviderManager { } } - private ensureEntry(chainId: number, wssUrl: string): ChainEntry { + private ensureEntry( + chainId: number, + wssUrl: string, + fallbackWssUrl?: string, + ): ChainEntry { + const fallback = fallbackWssUrl ?? null; const existing = this.chains.get(chainId); if (existing) { - if (existing.wssUrl !== wssUrl) { + // Identity is the (primary, fallback) tuple. Two callers must agree + // on both; otherwise the second caller would silently inherit the + // first caller's failover behaviour. + if (existing.wssUrl !== wssUrl || existing.fallbackWssUrl !== fallback) { throw new Error( - `chainId ${chainId} already registered with wssUrl ${existing.wssUrl}; refusing to reuse for ${wssUrl}`, + `chainId ${chainId} already registered with wssUrl=${existing.wssUrl} fallbackWssUrl=${existing.fallbackWssUrl}; refusing to reuse for wssUrl=${wssUrl} fallbackWssUrl=${fallback}`, ); } return existing; @@ -371,6 +455,8 @@ export class ChainProviderManager { const entry: ChainEntry = { chainId, wssUrl, + fallbackWssUrl: fallback, + activeWssUrl: null, provider: null, readyPromise: null, reconnectPromise: null, @@ -387,13 +473,66 @@ export class ChainProviderManager { return entry; } + /** + * Ordered list of URLs to try at (re)connect time: primary first, + * fallback (if configured) second. Returned fresh on every call so a + * caller can iterate without mutating entry state. + */ + private candidateUrls(entry: ChainEntry): string[] { + return entry.fallbackWssUrl + ? [entry.wssUrl, entry.fallbackWssUrl] + : [entry.wssUrl]; + } + + /** + * Walk the candidate URL list in order, returning the first + * `(provider, urlUsed)` pair that satisfies factory + ready + probe. + * On failure of one URL the partially-constructed provider is + * destroyed best-effort before moving on, so we do not leak sockets + * across attempts. If every URL fails, throws an aggregate error + * containing each URL's failure message. + */ + private async openProvider( + entry: ChainEntry, + ): Promise<{ provider: ethers.WebSocketProvider; urlUsed: string }> { + const urls = this.candidateUrls(entry); + const failures: string[] = []; + for (const url of urls) { + let provider: ethers.WebSocketProvider | null = null; + try { + provider = this.factory(url); + await provider.ready; + await this.probeSubscriptionSupport(provider, entry, url); + return { provider, urlUsed: url }; + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + failures.push(`${url}: ${message}`); + if (provider) { + try { + await provider.destroy(); + } catch { + // Best-effort: socket may already be gone (probe failure + // already destroys), and we are about to throw or move on. + } + } + } + } + throw new Error( + `chain ${entry.chainId}: all ${urls.length} WSS URL(s) failed:\n ${failures.join("\n ")}`, + ); + } + private async createProvider( entry: ChainEntry, ): Promise { - const provider = this.factory(entry.wssUrl); - await provider.ready; - await this.probeSubscriptionSupport(provider, entry); + const { provider, urlUsed } = await this.openProvider(entry); entry.provider = provider; + entry.activeWssUrl = urlUsed; + if (urlUsed !== entry.wssUrl) { + logger.warn( + `[ChainProviderManager] chain=${entry.chainId} primary failed; running on fallback ${urlUsed}`, + ); + } // Clear the prior failure marker now that we have a working provider. // Without this, a chain that recovered after a probe failure would // still report `lastCreateError` indefinitely. @@ -427,21 +566,44 @@ export class ChainProviderManager { private async probeSubscriptionSupport( provider: ethers.WebSocketProvider, entry: ChainEntry, + urlUsed: string, ): Promise { let filterId: unknown; try { - filterId = await provider.send("eth_subscribe", ["newHeads"]); - } catch (err) { + // Race the RPC call against an explicit timeout. ethers does not + // give us an externally controllable timeout on `provider.send`, + // and an upstream that accepts the WS handshake but never answers + // the JSON-RPC frame would otherwise hang createProvider for the + // life of the socket. The Node 20 native timer doesn't need clearing + // because the race winner discards the loser's result, but we still + // clear it explicitly so the timeout doesn't keep the event loop + // alive after a fast probe. + let timeoutHandle: NodeJS.Timeout | null = null; + const timeoutPromise = new Promise((_, reject) => { + timeoutHandle = setTimeout( + () => + reject( + new Error( + `eth_subscribe probe timed out after ${PROBE_TIMEOUT_MS}ms`, + ), + ), + PROBE_TIMEOUT_MS, + ); + }); try { - await provider.destroy(); - } catch { - // Best-effort: the failed provider is already unusable; if - // destroy throws (e.g. socket already closed) there is nothing - // to do but proceed to the throw below. + filterId = await Promise.race([ + provider.send("eth_subscribe", ["newHeads"]), + timeoutPromise, + ]); + } finally { + if (timeoutHandle) { + clearTimeout(timeoutHandle); + } } + } catch (err) { const message = err instanceof Error ? err.message : String(err); throw new Error( - `chain ${entry.chainId} (${entry.wssUrl}): RPC does not support eth_subscribe: ${message}`, + `chain ${entry.chainId} (${urlUsed}): RPC does not support eth_subscribe: ${message}`, ); } try { @@ -654,21 +816,23 @@ export class ChainProviderManager { } } entry.provider = null; + entry.activeWssUrl = null; entry.readyPromise = null; if (this.isDestroyed) { return; } - // Re-create. Any throw here propagates to the loop which handles - // backoff. - const provider = this.factory(entry.wssUrl); - await provider.ready; + // Re-create using the same primary-then-fallback walk as + // createProvider. Each (re)connect tries primary first so a primary + // that recovers is preferred. Any throw here propagates to the loop + // which handles backoff. + const { provider, urlUsed } = await this.openProvider(entry); - // Destroy may have run while we were waiting for `ready`. If so, the - // entry we are about to populate is no longer in `this.chains` and - // attaching listeners would leak a provider that never gets - // destroyed by the second pass. + // Destroy may have run while we were waiting for `ready` / probe. If + // so, the entry we are about to populate is no longer in + // `this.chains` and attaching listeners would leak a provider that + // never gets destroyed by the second pass. if (this.isDestroyed) { try { await provider.destroy(); @@ -678,14 +842,13 @@ export class ChainProviderManager { return; } - // Same probe as createProvider: confirm the rebuilt connection still - // accepts eth_subscribe before any .on("block") call exposes us to - // ethers' uncaught-rejection path. A failure here propagates out and - // the reconnect loop counts it as a failed attempt, falling back on - // exponential backoff. - await this.probeSubscriptionSupport(provider, entry); - entry.provider = provider; + entry.activeWssUrl = urlUsed; + if (urlUsed !== entry.wssUrl) { + logger.warn( + `[ChainProviderManager] chain=${entry.chainId} reconnected on fallback ${urlUsed}`, + ); + } // Successful reconnect clears any prior failure marker so /healthz // stops reporting a stale error on a now-healthy chain. entry.lastCreateError = null; diff --git a/keeperhub-events/event-tracker/src/listener/event-listener.ts b/keeperhub-events/event-tracker/src/listener/event-listener.ts index 74636ec1a..1d9bfba6b 100644 --- a/keeperhub-events/event-tracker/src/listener/event-listener.ts +++ b/keeperhub-events/event-tracker/src/listener/event-listener.ts @@ -29,6 +29,7 @@ export interface EventListenerOptions { workflowName: string; chainId: number; wssUrl: string; + fallbackWssUrl?: string; contractAddress: string; eventName: string; eventsAbiStrings: string[]; @@ -82,6 +83,7 @@ export class EventListener { this.unsubscribe = await this.opts.providerManager.subscribeToLogs({ chainId: this.opts.chainId, wssUrl: this.opts.wssUrl, + fallbackWssUrl: this.opts.fallbackWssUrl, address: this.opts.contractAddress, topic0: eventFragment.topicHash, handler: (log) => this.onLog(log), diff --git a/keeperhub-events/event-tracker/src/listener/registry.ts b/keeperhub-events/event-tracker/src/listener/registry.ts index 393325f1d..046107bc3 100644 --- a/keeperhub-events/event-tracker/src/listener/registry.ts +++ b/keeperhub-events/event-tracker/src/listener/registry.ts @@ -26,6 +26,13 @@ export interface WorkflowRegistration { workflowName: string; chainId: number; wssUrl: string; + /** + * Optional secondary WSS endpoint. If primary is unreachable or rejects + * `eth_subscribe`, ChainProviderManager falls through to this URL before + * giving up. Populated from `chains.default_fallback_wss` when it parses + * as a valid `ws://` / `wss://` URL. + */ + fallbackWssUrl?: string; contractAddress: string; eventName: string; eventsAbiStrings: string[]; diff --git a/keeperhub-events/event-tracker/src/listener/workflow-mapper.ts b/keeperhub-events/event-tracker/src/listener/workflow-mapper.ts index efa973b9e..54be657c3 100644 --- a/keeperhub-events/event-tracker/src/listener/workflow-mapper.ts +++ b/keeperhub-events/event-tracker/src/listener/workflow-mapper.ts @@ -80,6 +80,25 @@ export function buildRegistration( return null; } + // Fallback is optional. Same nullable-DB-column caveat as primary: the + // type says `string` but rows can be null/empty. A bad fallback (wrong + // scheme, empty) is logged and dropped rather than failing the whole + // workflow; the listener still runs on primary alone. + const rawFallbackWssUrl: unknown = network.defaultFallbackWss; + let fallbackWssUrl: string | undefined; + if (typeof rawFallbackWssUrl === "string" && rawFallbackWssUrl.length > 0) { + if ( + rawFallbackWssUrl.startsWith("wss://") || + rawFallbackWssUrl.startsWith("ws://") + ) { + fallbackWssUrl = rawFallbackWssUrl; + } else { + logger.warn( + `[workflow-mapper] workflow ${workflowId} chain ${chainId} defaultFallbackWss is not a WebSocket URL ("${rawFallbackWssUrl}"); ignoring fallback`, + ); + } + } + const contractAddress = typeof config.contractAddress === "string" ? config.contractAddress : null; if (!contractAddress) { @@ -140,7 +159,8 @@ export function buildRegistration( userId, workflowName, chainId, - wssUrl: network.defaultPrimaryWss, + wssUrl, + fallbackWssUrl, contractAddress, eventName, eventsAbiStrings, @@ -167,6 +187,7 @@ export function hashRegistration( const canonical = JSON.stringify({ chainId: reg.chainId, wssUrl: reg.wssUrl, + fallbackWssUrl: reg.fallbackWssUrl ?? null, contractAddress: reg.contractAddress, eventName: reg.eventName, eventsAbiStrings: reg.eventsAbiStrings, diff --git a/keeperhub-events/event-tracker/tests/integration/provider-manager-bad-url.test.ts b/keeperhub-events/event-tracker/tests/integration/provider-manager-bad-url.test.ts new file mode 100644 index 000000000..9188c1c93 --- /dev/null +++ b/keeperhub-events/event-tracker/tests/integration/provider-manager-bad-url.test.ts @@ -0,0 +1,93 @@ +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { ChainProviderManager } from "../../src/chains/provider-manager"; + +/** + * Live-network test for the failure modes that crashed the pod during + * KEEP-434 manual verification: a misconfigured WSS URL throwing at the + * underlying `ws` socket level (DNS NXDOMAIN, ECONNREFUSED, non-WS server) + * was leaking past `openProvider`'s try/catch and reaching + * `process.on("uncaughtException")` in `index.ts`, which exits the pod. + * + * Uses `ws://127.0.0.1:1` (port 1 is unused on Linux, ECONNREFUSED is + * synchronous and deterministic - no real DNS or remote network). + * + * Exercises the REAL `defaultFactory` path (no injected mock) because the + * bug is in the ws library's interaction with ethers' WebSocketProvider, + * which the unit-test MockProvider does not simulate. + */ +describe("ChainProviderManager (real ws): bad-URL safety", () => { + let onPermanentFailure: () => void; + let manager: ChainProviderManager; + + // Capture any uncaughtException that escapes during a test. Vitest + // installs its own listener for failure reporting; we record errors + // alongside it without removing it. + const capturedExceptions: unknown[] = []; + const listener = (err: unknown): void => { + capturedExceptions.push(err); + }; + + beforeEach(() => { + capturedExceptions.length = 0; + process.on("uncaughtException", listener); + onPermanentFailure = (): void => { + // No-op so reconnect exhaustion does not call process.exit during + // tests. Real prod uses defaultOnPermanentFailure which exits. + }; + manager = new ChainProviderManager({ onPermanentFailure }); + }); + + afterEach(async () => { + process.off("uncaughtException", listener); + await manager.destroy(); + }); + + it("rejects cleanly when the only URL is unreachable (ECONNREFUSED)", async () => { + // Pre-fix: this leaked `Error: connect ECONNREFUSED 127.0.0.1:1` + // to process.uncaughtException via ws's EventEmitter throw, the + // fatal handler in index.ts called process.exit(1), and K8s would + // crashloop the pod. Post-fix: the no-op error listener in + // defaultFactory keeps the ws emit harmless and the rejection + // surfaces through the awaited path. + await expect( + manager.getOrCreateProvider(31337, "ws://127.0.0.1:1"), + ).rejects.toThrow(/ws:\/\/127\.0\.0\.1:1/); + + expect(capturedExceptions).toEqual([]); + }); + + it("walks to the fallback when the primary is unreachable, surfacing both failures if both die", async () => { + // Both URLs unreachable - same crash path as above for each + // attempt. The aggregate error must mention both URLs and no + // uncaughtException is allowed. + await expect( + manager.getOrCreateProvider( + 31_338, + "ws://127.0.0.1:1", + "ws://127.0.0.1:2", + ), + ).rejects.toThrow(/ws:\/\/127\.0\.0\.1:1.*ws:\/\/127\.0\.0\.1:2/s); + + expect(capturedExceptions).toEqual([]); + }); + + it("rejects cleanly when the host does not resolve (DNS NXDOMAIN)", async () => { + // Different failure mode than ECONNREFUSED: the .invalid TLD is + // reserved per RFC 6761 and guaranteed to never resolve, so this + // exercises the dns.lookup error path (`getaddrinfo ENOTFOUND`). + // That was the original failure mode I hit during KEEP-434 manual + // verification - the error came from `node:dns` rather than + // `ws.ClientRequest`, but the same EventEmitter-throw rule made it + // crash the pod. The fix's defensive listener covers both because + // ws emits 'error' on the WebSocket regardless of which network + // layer surfaced the failure. + await expect( + manager.getOrCreateProvider( + 31_339, + "wss://does-not-exist-keep434.invalid/", + ), + ).rejects.toThrow(/does-not-exist-keep434\.invalid/); + + expect(capturedExceptions).toEqual([]); + }); +}); diff --git a/keeperhub-events/event-tracker/tests/unit/provider-manager-default-factory.test.ts b/keeperhub-events/event-tracker/tests/unit/provider-manager-default-factory.test.ts new file mode 100644 index 000000000..ae1933285 --- /dev/null +++ b/keeperhub-events/event-tracker/tests/unit/provider-manager-default-factory.test.ts @@ -0,0 +1,100 @@ +import { EventEmitter } from "node:events"; +import { ethers } from "ethers"; +import { afterEach, describe, expect, it } from "vitest"; + +/** + * Locks in the invariant that the bad-URL crash fix + * (provider-manager.ts:165 defaultFactory) relies on: + * + * `ethers.WebSocketProvider(creator)` does NOT remove or replace + * EventEmitter-style listeners attached to the underlying socket. ethers + * assigns to `socket.onerror` (a property), which coexists with + * `socket.on("error", ...)` listeners on the same EventEmitter. If a + * future ethers upgrade started calling `socket.removeAllListeners` + * inside `_start()`, our defensive listener would be stripped and the + * crash-on-bad-URL bug would resurface. + * + * Distinct from `provider-manager-bad-url.test.ts` which uses real + * network. This one is offline and runs as a fast unit check. + */ + +class MockWsSocket extends EventEmitter { + url: string; + onerror: ((ev: unknown) => void) | null = null; + onopen: ((ev: unknown) => void) | null = null; + onclose: ((ev: unknown) => void) | null = null; + onmessage: ((ev: unknown) => void) | null = null; + + constructor(url: string) { + super(); + this.url = url; + } + send(_payload: string): void { + // not used in this test + } + close(_code?: number, _reason?: string): void { + // not used in this test + } +} + +describe("defaultFactory creator pattern: ethers compatibility", () => { + const providers: ethers.WebSocketProvider[] = []; + + afterEach(async () => { + while (providers.length) { + const p = providers.pop(); + if (p) { + await p.destroy().catch(() => undefined); + } + } + }); + + it("EventEmitter on('error') listener attached inside the creator survives ethers' WebSocketProvider construction", () => { + // Mirror what defaultFactory does: create a socket, attach the + // defensive `on("error", noop)` listener synchronously, then hand + // it to ethers via the creator overload. + const socket = new MockWsSocket("ws://test/"); + socket.on("error", () => { + // Mirrors the no-op in defaultFactory. + }); + expect(socket.listenerCount("error")).toBe(1); + + const provider = new ethers.WebSocketProvider( + () => socket as unknown as ethers.WebSocketLike, + ); + providers.push(provider); + + // The defensive listener must still be attached after ethers wires + // up its own onerror. ethers uses property assignment (independent + // of EventEmitter listener storage), so listenerCount stays >= 1. + // This is the load-bearing invariant: if it ever drops to 0, an + // early `error` event from the underlying ws would re-throw and + // crash the pod again. + expect(socket.listenerCount("error")).toBeGreaterThanOrEqual(1); + }); + + it("ethers does not call removeAllListeners on the socket after construction", () => { + // Belt-and-suspenders: even if ethers attaches its own + // EventEmitter-style listener on top of ours, what matters is that + // it does not remove ours. Spy removeAllListeners and assert it is + // never called by ethers. + const socket = new MockWsSocket("ws://test/"); + let removeAllCalled = 0; + const orig = socket.removeAllListeners.bind(socket); + socket.removeAllListeners = ((event?: string | symbol) => { + removeAllCalled++; + return orig(event); + }) as typeof socket.removeAllListeners; + + socket.on("error", () => { + // defensive + }); + + const provider = new ethers.WebSocketProvider( + () => socket as unknown as ethers.WebSocketLike, + ); + providers.push(provider); + + expect(removeAllCalled).toBe(0); + }); +}); diff --git a/keeperhub-events/event-tracker/tests/unit/provider-manager.test.ts b/keeperhub-events/event-tracker/tests/unit/provider-manager.test.ts index 552484297..0c9541d1a 100644 --- a/keeperhub-events/event-tracker/tests/unit/provider-manager.test.ts +++ b/keeperhub-events/event-tracker/tests/unit/provider-manager.test.ts @@ -303,6 +303,65 @@ describe("ChainProviderManager", () => { await localManager.destroy(); }); }); + + // Fallback URL is opt-in via the optional third parameter on + // getOrCreateProvider / SubscribeOptions. When the primary fails at + // factory + ready + probe, the manager walks to the fallback before + // surfacing failure. Reconnect uses the same walk, so a primary that + // recovers is preferred on the next reconnect. + describe("fallback wssUrl", () => { + it("uses the primary when it works and never invokes the fallback", async () => { + await manager.getOrCreateProvider(CHAIN_A, "ws://primary", "ws://fb"); + expect(factoryBundle.created).toHaveLength(1); + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://primary"); + expect(manager.getHealth(CHAIN_A)?.fallbackWssUrl).toBe("ws://fb"); + }); + + it("falls through to the fallback when the primary probe fails", async () => { + // One-shot: only the first provider created (i.e. the one for the + // primary URL) gets the subscribe failure. The fallback's fresh + // provider has no failure armed, so its probe succeeds. + factoryBundle.setNextSubscribeFailure( + new Error('unsupported operation (operation="eth_subscribe")'), + ); + await manager.getOrCreateProvider(CHAIN_A, "ws://primary", "ws://fb"); + expect(factoryBundle.created).toHaveLength(2); + // Failed primary provider is destroyed before we move on so the + // socket does not leak across the failover. + expect(factoryBundle.created[0].destroyed).toBe(true); + expect(factoryBundle.created[1].destroyed).toBe(false); + // Health surface reflects the active URL, not the configured + // primary, so operators can see failover at a glance. + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://fb"); + }); + + it("aggregates errors from both URLs when both fail", async () => { + // Persistent failure makes every factory call throw. Both primary + // and fallback fail before the call resolves, and the surfaced + // error must mention both URLs so operators can debug. + factoryBundle.setPersistentFailure(new Error("connect refused")); + await expect( + manager.getOrCreateProvider(CHAIN_A, "ws://primary", "ws://fb"), + ).rejects.toThrow(/ws:\/\/primary.*ws:\/\/fb/s); + }); + + it("rejects a mismatched fallback for a known chainId", async () => { + // The primary+fallback tuple is the entry's identity. A second + // caller with a different fallback would silently inherit the + // first caller's failover URL, so we throw instead. + await manager.getOrCreateProvider(CHAIN_A, "ws://primary", "ws://fb"); + await expect( + manager.getOrCreateProvider(CHAIN_A, "ws://primary", "ws://other"), + ).rejects.toThrow(/already registered/); + }); + + it("works without a fallback (single-URL backwards compatibility)", async () => { + // Existing call sites that pass no fallback still work and report + // null in the fallback health field. + await manager.getOrCreateProvider(CHAIN_A, "ws://primary"); + expect(manager.getHealth(CHAIN_A)?.fallbackWssUrl).toBeNull(); + }); + }); }); describe("subscribeToLogs block listener lifecycle", () => { @@ -683,6 +742,7 @@ describe("ChainProviderManager", () => { expect(h).toEqual({ chainId: CHAIN_A, wssUrl: "ws://a", + fallbackWssUrl: null, connected: true, reconnecting: false, lastBlockAt: null, @@ -1080,6 +1140,81 @@ describe("ChainProviderManager", () => { await vi.advanceTimersByTimeAsync(2_500); expect(manager.isHealthy(CHAIN_A)).toBe(true); }); + + it("walks to the fallback URL when the primary fails on reconnect", async () => { + // Reconnect runs the same primary-then-fallback walk as the + // initial createProvider. With one armed probe failure, the + // reconnect's primary attempt fails and openProvider falls + // through to the fallback. The active URL surfaces through + // getHealth so operators can see failover via /healthz mid-incident. + await manager.subscribeToLogs({ + chainId: CHAIN_A, + wssUrl: "ws://primary", + fallbackWssUrl: "ws://fb", + address: ADDR_A, + topic0: TOPIC_EMITTED, + handler: vi.fn(), + }); + expect(factoryBundle.created).toHaveLength(1); + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://primary"); + + // Arm a one-shot probe failure. The reconnect's primary attempt + // gets it; the fallback attempt that follows has no failure armed. + factoryBundle.setNextSubscribeFailure( + new Error('unsupported operation (operation="eth_subscribe")'), + ); + factoryBundle.created[0].emitError(new Error("wss dropped")); + await vi.advanceTimersByTimeAsync(1_500); + + // openProvider tore down the failed primary attempt before + // moving on, then created a fresh mock for the fallback. + // Initial + primary attempt + fallback attempt = 3 providers. + expect(factoryBundle.created).toHaveLength(3); + expect(factoryBundle.created[1].destroyed).toBe(true); + expect(factoryBundle.created[2].destroyed).toBe(false); + expect(manager.isHealthy(CHAIN_A)).toBe(true); + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://fb"); + // Block listener and heartbeat must be re-attached on the + // fallback provider; otherwise events would be silently dropped + // after failover. + expect(factoryBundle.created[2].hasBlockHandler()).toBe(true); + expect(factoryBundle.created[2].hasErrorHandler()).toBe(true); + }); + + it("flips back to the primary on the next reconnect once the primary recovers", async () => { + // Running on the fallback is a degraded state, not a sticky one. + // When the primary recovers, the next reconnect's primary-first + // walk picks it up. Without this, a transient primary blip would + // strand the chain on the fallback until process restart. + await manager.subscribeToLogs({ + chainId: CHAIN_A, + wssUrl: "ws://primary", + fallbackWssUrl: "ws://fb", + address: ADDR_A, + topic0: TOPIC_EMITTED, + handler: vi.fn(), + }); + + // First reconnect: primary fails, manager runs on fallback. + factoryBundle.setNextSubscribeFailure(new Error("transient")); + factoryBundle.created[0].emitError(new Error("drop")); + await vi.advanceTimersByTimeAsync(1_500); + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://fb"); + const fallbackProvider = factoryBundle.created.at(-1); + const createdBeforeSecond = factoryBundle.created.length; + + // Second reconnect: nothing armed, so the primary attempt + // succeeds and openProvider returns on the first URL it tries. + // The fallback URL is never even hit. + fallbackProvider?.emitError(new Error("drop 2")); + await vi.advanceTimersByTimeAsync(1_500); + + // Exactly one new provider for the recovered primary - no + // wasted fallback factory call. + expect(factoryBundle.created.length - createdBeforeSecond).toBe(1); + expect(manager.isHealthy(CHAIN_A)).toBe(true); + expect(manager.getHealth(CHAIN_A)?.wssUrl).toBe("ws://primary"); + }); }); describe("heartbeat", () => { diff --git a/keeperhub-events/event-tracker/tests/unit/workflow-mapper.test.ts b/keeperhub-events/event-tracker/tests/unit/workflow-mapper.test.ts index d3952ccc5..c8b0e5727 100644 --- a/keeperhub-events/event-tracker/tests/unit/workflow-mapper.test.ts +++ b/keeperhub-events/event-tracker/tests/unit/workflow-mapper.test.ts @@ -79,6 +79,7 @@ describe("buildRegistration", () => { workflowName: "Test Workflow", chainId: CHAIN_ID, wssUrl: "ws://localhost:8546", + fallbackWssUrl: "ws://localhost:8546", contractAddress: "0x1111111111111111111111111111111111111111", eventName: "Transfer", }); @@ -152,6 +153,21 @@ describe("buildRegistration", () => { expect(a?.configHash).not.toBe(b?.configHash); }); + it("changes when fallbackWssUrl changes", () => { + // Switching fallback providers is a real config change: the + // reconciler must restart the listener so the new URL becomes the + // entry's identity in the provider manager. + const a = buildRegistration(makeWorkflow(), NETWORKS); + const networksB: NetworksMap = { + [CHAIN_ID]: { + ...NETWORK, + defaultFallbackWss: "wss://different-fallback.example.com", + }, + }; + const b = buildRegistration(makeWorkflow(), networksB); + expect(a?.configHash).not.toBe(b?.configHash); + }); + it("hashRegistration matches the hash in the built registration", () => { const reg = buildRegistration(makeWorkflow(), NETWORKS); expect(reg).not.toBeNull(); @@ -236,6 +252,54 @@ describe("buildRegistration", () => { expect(reg?.wssUrl).toBe("wss://eth-mainnet.example.com"); }); + // defaultFallbackWss has the same nullable-DB-column issue as the + // primary, but a bad fallback should NOT fail the whole workflow - the + // listener can still run on primary alone. + describe("defaultFallbackWss handling", () => { + it("includes fallbackWssUrl when valid", () => { + const networks: NetworksMap = { + [CHAIN_ID]: { + ...NETWORK, + defaultPrimaryWss: "wss://primary.example.com", + defaultFallbackWss: "wss://fallback.example.com", + }, + }; + const reg = buildRegistration(makeWorkflow(), networks); + expect(reg?.fallbackWssUrl).toBe("wss://fallback.example.com"); + }); + + it("drops a null fallback (workflow still runs on primary)", () => { + const networks: NetworksMap = { + [CHAIN_ID]: { + ...NETWORK, + defaultFallbackWss: null as unknown as string, + }, + }; + const reg = buildRegistration(makeWorkflow(), networks); + expect(reg).not.toBeNull(); + expect(reg?.fallbackWssUrl).toBeUndefined(); + }); + + it("drops an empty fallback", () => { + const networks: NetworksMap = { + [CHAIN_ID]: { ...NETWORK, defaultFallbackWss: "" }, + }; + const reg = buildRegistration(makeWorkflow(), networks); + expect(reg?.fallbackWssUrl).toBeUndefined(); + }); + + it("drops a fallback with the wrong scheme", () => { + const networks: NetworksMap = { + [CHAIN_ID]: { + ...NETWORK, + defaultFallbackWss: "https://eth-mainnet.example.com", + }, + }; + const reg = buildRegistration(makeWorkflow(), networks); + expect(reg?.fallbackWssUrl).toBeUndefined(); + }); + }); + it("returns null when contractAddress is missing", () => { expect( buildRegistration( diff --git a/keeperhub-events/pnpm-lock.yaml b/keeperhub-events/pnpm-lock.yaml index 5cda7f540..141916051 100644 --- a/keeperhub-events/pnpm-lock.yaml +++ b/keeperhub-events/pnpm-lock.yaml @@ -35,6 +35,9 @@ importers: uuid: specifier: ^14.0.0 version: 14.0.0 + ws: + specifier: ^8.17.1 + version: 8.17.1 devDependencies: '@types/node': specifier: ^24.12.2 @@ -42,6 +45,9 @@ importers: '@types/uuid': specifier: ^10.0.0 version: 10.0.0 + '@types/ws': + specifier: ^8.5.13 + version: 8.18.1 tsup: specifier: ^8.3.5 version: 8.5.1(postcss@8.5.10)(tsx@4.21.0)(typescript@5.9.3) @@ -847,6 +853,9 @@ packages: '@types/uuid@10.0.0': resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + '@types/ws@8.18.1': + resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} + '@vitest/expect@4.1.2': resolution: {integrity: sha512-gbu+7B0YgUJ2nkdsRJrFFW6X7NTP44WlhiclHniUhxADQJH5Szt9mZ9hWnJPJ8YwOK5zUOSSlSvyzRf0u1DSBQ==} @@ -2319,6 +2328,10 @@ snapshots: '@types/uuid@10.0.0': {} + '@types/ws@8.18.1': + dependencies: + '@types/node': 24.12.2 + '@vitest/expect@4.1.2': dependencies: '@standard-schema/spec': 1.1.0 diff --git a/lib/agentic-wallet/workflow-binding.ts b/lib/agentic-wallet/workflow-binding.ts index ebc9903ed..3b6f082a3 100644 --- a/lib/agentic-wallet/workflow-binding.ts +++ b/lib/agentic-wallet/workflow-binding.ts @@ -33,6 +33,33 @@ * mismatch (defensive) rather than null (permissive) so we never silently * widen access on an unrecognised tag. * + * Fix-pack-5 (KEEP-432): the chain field on a listing is overloaded — for + * Base-data workflows the data chain and the payment chain happen to be the + * same ("base"/"8453"), so the registered chain doubles as the payment-chain + * pin. For workflows whose data chain is *not* a payment chain (Ethereum, + * Optimism, Polygon, Arbitrum), the listing's chain identifies WHERE THE + * CONTRACTS LIVE, not which chain payment must arrive on. Such listings now + * accept either Base x402 or Tempo MPP. The defensive-mismatch behaviour for + * unknown / unparseable tags is preserved — only the explicitly whitelisted + * data-chain ids in `KNOWN_DATA_CHAIN_IDS` widen the payment side. + * + * Security: even on data-chain listings the binding still server-derives + * payTo from the registry on the Base path, and the Tempo path still resolves + * the workflow's price for the daily-spend deduction. The fix-pack-3 N-1 + * concern (dual-chain victim, attacker chooses weaker chain) is unchanged for + * Base- and Tempo-pinned listings. For data-chain listings there is no + * payment-chain preference inherent to the workflow, so the pin doesn't apply. + * + * Note on creator degree-of-freedom: workflows.chain is a free-form text + * column written by the listing API (lib/mcp/listing.ts) without an input + * allowlist. A creator can intentionally tag a Base-only workflow with + * chain="1" to widen acceptance to either payment chain. This isn't a + * security boundary violation — payTo is still server-derived from the + * org's wallet so payers are paying the legit creator regardless — but it + * does mean which payment chains a listing accepts is author-controlled, + * not platform-enforced. Tighten at the listing API if that ever becomes + * a policy concern. + * * Lookup chain mirrors lib/x402/payment-gate.ts:resolveCreatorWallet. */ import { and, eq } from "drizzle-orm"; @@ -75,36 +102,65 @@ const TEMPO_MAINNET_CHAIN_ID_STR = String(TEMPO_MAINNET_CHAIN_ID); const TEMPO_TESTNET_CHAIN_ID_STR = String(TEMPO_TESTNET_CHAIN_ID); /** - * Map any chain tag we accept on a workflow listing — slug ("base", - * "tempo") or numeric chain id ("8453", "4217", "4218") — into the - * canonical BindingChain form used by /sign callers. Returns null for - * unrecognised values so the caller can decide whether null means - * "permissive legacy" (when wf.chain itself is null) or "defensive - * mismatch" (when wf.chain is set but unparseable). + * Whitelisted data-chain ids — chains that KeeperHub workflows can READ from + * but that are NOT payment chains. Listings with one of these chains accept + * payment via either Base x402 or Tempo MPP. Mirrors the mainnet set declared + * in lib/rpc/rpc-config.ts; extend by editing this set when adding support + * for a new read-only chain. + * + * Decimal string form to match the format stored in workflows.chain. + */ +export const KNOWN_DATA_CHAIN_IDS = new Set([ + "1", // Ethereum mainnet + "42161", // Arbitrum One + "43114", // Avalanche C-Chain + "56", // BNB Chain + "137", // Polygon + "16661", // 0G Mainnet (Aristotle) + "9745", // Plasma Mainnet +]); + +type ChainClassification = + | { readonly kind: "payment"; readonly chain: BindingChain } + | { readonly kind: "data" } + | { readonly kind: "unrecognised" }; + +/** + * Classify a workflow.chain tag into one of three buckets: + * - "payment": a recognised payment-chain slug or chain id; the listing is + * pinned to that payment chain and the caller must match. + * - "data": a recognised data-chain id (Ethereum, OP, Polygon, Arbitrum); the + * listing's chain identifies where the contracts live, not which chain + * payment must arrive on. Either Base or Tempo payment is accepted. + * - "unrecognised": a non-empty value we can't parse. Treated as defensive + * mismatch by the binding so a typo or future tag never silently widens + * access. * - * Case-insensitive on slug input; whitespace-trimmed. Numeric forms - * are matched as decimal strings against the canonical constants in - * lib/agentic-wallet/constants.ts so a future chain-id rename only - * has to be done in one place. + * Case-insensitive on slug input; whitespace-trimmed. Numeric forms match + * the canonical constants in lib/agentic-wallet/constants.ts so a chain-id + * rename only happens in one place. */ -function normaliseChainTag( +function classifyChainTag( value: string | null | undefined -): BindingChain | null { +): ChainClassification { if (typeof value !== "string") { - return null; + return { kind: "unrecognised" }; } const v = value.trim().toLowerCase(); if (v === "base" || v === BASE_CHAIN_ID_STR) { - return "base"; + return { kind: "payment", chain: "base" }; } if ( v === "tempo" || v === TEMPO_MAINNET_CHAIN_ID_STR || v === TEMPO_TESTNET_CHAIN_ID_STR ) { - return "tempo"; + return { kind: "payment", chain: "tempo" }; + } + if (KNOWN_DATA_CHAIN_IDS.has(v)) { + return { kind: "data" }; } - return null; + return { kind: "unrecognised" }; } function priceToMicro( @@ -156,17 +212,21 @@ export async function verifyWorkflowBinding( }; } - // Fix-pack-3 N-1 + Fix-pack-4 (KEEP-391): reject requests whose - // caller-supplied chain does not match the workflow's registered chain, - // comparing on a normalised tag so listings stored with a numeric chain - // id ("8453", "4217", "4218") compare equal to the slug forms ("base", - // "tempo"). Null is permissive for legacy listings that pre-date the - // workflows.chain column. Unknown / unparseable values are treated as a - // mismatch (defensive) rather than null (permissive) so we never silently - // widen access on an unrecognised tag. + // Fix-pack-3 N-1 + Fix-pack-4 (KEEP-391) + Fix-pack-5 (KEEP-432): classify + // the workflow's chain tag into payment / data / unrecognised. Payment- + // chain listings stay pinned to that chain (the original cross-chain-proof + // defence). Data-chain listings (Ethereum, Arbitrum, Polygon, BNB, Avalanche, 0G, Plasma) only + // describe where the workflow READS contracts from — they have no inherent + // payment-chain preference, so either Base x402 or Tempo MPP is accepted. + // Unrecognised tags stay defensive (mismatch) so a typo never widens access. + // A null wf.chain remains permissive for legacy listings that pre-date the + // workflows.chain column. if (wf.chain) { - const wfNorm = normaliseChainTag(wf.chain); - if (wfNorm === null || wfNorm !== chain) { + const wfClass = classifyChainTag(wf.chain); + const matched = + wfClass.kind === "data" || + (wfClass.kind === "payment" && wfClass.chain === chain); + if (!matched) { return { ok: false, status: 403, diff --git a/lib/rpc/rpc-config.ts b/lib/rpc/rpc-config.ts index b638337cd..d6d15d501 100644 --- a/lib/rpc/rpc-config.ts +++ b/lib/rpc/rpc-config.ts @@ -43,6 +43,11 @@ export const PUBLIC_RPCS = { POLYGON_AMOY_FALLBACK: "https://polygon-amoy-bor-rpc.publicnode.com", ARBITRUM_MAINNET: "https://arb1.arbitrum.io/rpc", ARBITRUM_MAINNET_FALLBACK: "https://rpc.ankr.com/arbitrum", + // Optimism is not in CHAIN_CONFIG (no keeperhub-supported feature has + // needed it yet), but Superfluid runs there and the verify script + // imports this entry. Add a CHAIN_CONFIG entry alongside if/when + // keeperhub adds Optimism as a registered chain. + OPTIMISM_MAINNET: "https://mainnet.optimism.io", ARBITRUM_SEPOLIA: "https://sepolia-rollup.arbitrum.io/rpc", ARBITRUM_SEPOLIA_FALLBACK: "https://arbitrum-sepolia-rpc.publicnode.com", AVAX_MAINNET: "https://api.avax.network/ext/bc/C/rpc", @@ -459,7 +464,9 @@ export function getPrivateRpcUrl( export function getUsePrivateMempoolRpc( options: GetPrivateMempoolOptions ): boolean { - return options.rpcConfig[options.jsonKey]?.isPrivateMempoolRpcEnabled ?? false; + return ( + options.rpcConfig[options.jsonKey]?.isPrivateMempoolRpcEnabled ?? false + ); } /** diff --git a/lib/types/integration.ts b/lib/types/integration.ts index 039fd1904..9e394b99a 100755 --- a/lib/types/integration.ts +++ b/lib/types/integration.ts @@ -9,7 +9,7 @@ * 2. Add a system integration to SYSTEM_INTEGRATION_TYPES in discover-plugins.ts * 3. Run: pnpm discover-plugins * - * Generated types: aave-v3, aave-v4, aerodrome, ai-gateway, ajna, chainlink, chronicle, clerk, code, compound, cowswap, curve, database, discord, ethena, lido, linear, math, morpho, pendle, protocol, resend, rocket-pool, safe, sendgrid, sky, slack, spark, telegram, uniswap, v0, web3, webflow, webhook, wrapped, yearn + * Generated types: aave-v3, aave-v4, aerodrome, ai-gateway, ajna, chainlink, chronicle, clerk, code, compound, cowswap, curve, database, discord, ethena, lido, linear, math, morpho, pendle, protocol, resend, rocket-pool, safe, sendgrid, sky, slack, spark, superfluid, telegram, uniswap, v0, web3, webflow, webhook, wrapped, yearn */ // Integration type union - plugins + system integrations @@ -42,6 +42,7 @@ export type IntegrationType = | "sky" | "slack" | "spark" + | "superfluid" | "telegram" | "uniswap" | "v0" diff --git a/lib/workflow/executor/executor.workflow.ts b/lib/workflow/executor/executor.workflow.ts index ce57c28f8..62ad46f8c 100644 --- a/lib/workflow/executor/executor.workflow.ts +++ b/lib/workflow/executor/executor.workflow.ts @@ -547,20 +547,54 @@ function hasNestedDataShape( typeof data === "object" && data !== null && "data" in data && - typeof (data as Record).data === "object" + typeof (data as Record).data === "object" && + (data as Record).data !== null + ); +} + +// KEEP-442: code/run-code wraps the user's return value in `.result` (the +// step output is `{ success, result, logs }`). Without this fallback, a +// downstream string field referencing `{{@prep:Prep.url}}` (where `prep` +// is a code/run-code that returned `{ url }`) resolves to "" because +// `data.url` is undefined and the existing `.data` fallback only matches +// the HTTP-style wrapper shape. +function hasNestedResultShape( + data: unknown +): data is Record & { result: object } { + return ( + typeof data === "object" && + data !== null && + "result" in data && + typeof (data as Record).result === "object" && + (data as Record).result !== null ); } /** - * Resolve from output.data, or from output.data.data when step wraps body in .data (e.g. HTTP). + * Resolve a field path from output data, transparently unwrapping common + * step-result wrappers when the path doesn't match at the top level: + * - `{ data: ... }` (HTTP-style result) + * - `{ result: ... }` (code/run-code wrapper -- KEEP-442) */ -function resolveFromOutputData(data: unknown, fieldPath: string): unknown { +export function resolveFromOutputData( + data: unknown, + fieldPath: string +): unknown { const fromTop = fieldPath ? resolveConfigFieldPath(data, fieldPath) : data; if (fromTop !== undefined && fromTop !== null) { return fromTop; } if (hasNestedDataShape(data)) { const inner = data.data; + const fromInner = fieldPath + ? resolveConfigFieldPath(inner, fieldPath) + : inner; + if (fromInner !== undefined && fromInner !== null) { + return fromInner; + } + } + if (hasNestedResultShape(data)) { + const inner = data.result; return fieldPath ? resolveConfigFieldPath(inner, fieldPath) : inner; } return; @@ -631,7 +665,7 @@ function replaceConfigTemplate( * Process template variables in config. * Recurses into nested objects; supports array paths like data.recipes[0]. */ -function processTemplates( +export function processTemplates( config: Record, outputs: NodeOutputs ): Record { diff --git a/protocols/index.ts b/protocols/index.ts index 86e52b118..8749bb31f 100644 --- a/protocols/index.ts +++ b/protocols/index.ts @@ -8,7 +8,7 @@ * This ensures the protocol registry is populated when the Next.js * server starts (via the plugin import chain). * - * Registered protocols: aave-v3, aave-v4, aerodrome, ajna, chainlink, chronicle, compound, cowswap, curve, ethena, lido, morpho, pendle, rocket-pool, safe, sky, spark, uniswap, wrapped, yearn + * Registered protocols: aave-v3, aave-v4, aerodrome, ajna, chainlink, chronicle, compound, cowswap, curve, ethena, lido, morpho, pendle, rocket-pool, safe, sky, spark, superfluid, uniswap, wrapped, yearn */ import { protocolToPlugin, registerProtocol } from "@/lib/protocol-registry"; @@ -31,6 +31,7 @@ import rocketPoolDef from "./rocket-pool"; import safeDef from "./safe"; import skyDef from "./sky"; import sparkDef from "./spark"; +import superfluidDef from "./superfluid"; import uniswapDef from "./uniswap-v3"; import wrappedDef from "./wrapped"; import yearnDef from "./yearn-v3"; @@ -69,6 +70,8 @@ registerProtocol(skyDef); registerIntegration(protocolToPlugin(skyDef)); registerProtocol(sparkDef); registerIntegration(protocolToPlugin(sparkDef)); +registerProtocol(superfluidDef); +registerIntegration(protocolToPlugin(superfluidDef)); registerProtocol(uniswapDef); registerIntegration(protocolToPlugin(uniswapDef)); registerProtocol(wrappedDef); diff --git a/protocols/superfluid.ts b/protocols/superfluid.ts new file mode 100644 index 000000000..db7c6bc01 --- /dev/null +++ b/protocols/superfluid.ts @@ -0,0 +1,621 @@ +import { defineProtocol } from "@/lib/protocol-registry"; + +/** + * Chain IDs (as strings, matching ProtocolContract.addresses keys) where the + * Superfluid CFAv1 and GDAv1 forwarders are deployed. + * + * Adding a chain: append its ID here. Both forwarders pick it up automatically + * via sameOnAllChains() because the addresses are deliberately constant across + * every chain Superfluid supports. To ship a new chain, also add an entry to + * tests/scripts/verify-superfluid-addresses.ts so the bytecode check covers it. + */ +export const SUPERFLUID_CHAIN_IDS = [ + "1", // Ethereum Mainnet + "10", // Optimism + "137", // Polygon + "8453", // Base + "42161", // Arbitrum One + "11155111", // Sepolia +] as const; + +/** + * Build the per-chain address map for a contract that's deployed at the same + * address on every chain in SUPERFLUID_CHAIN_IDS. Both forwarders use this -- + * Superfluid intentionally pins them to identical addresses cross-chain. + */ +function sameOnAllChains(address: string): Record { + return Object.fromEntries(SUPERFLUID_CHAIN_IDS.map((id) => [id, address])); +} + +const FLOW_RATE_HELP = + "Wei per second (int96). 1 USDCx/month is approximately 385,802,469,135 wei/s at 18 decimals. Computed: amount * 10^decimals / seconds."; + +const CREATE_FLOW_RATE_HELP = `${FLOW_RATE_HELP} Sender needs at least ~3 hours of stream value as a deposit; verify with get-super-token-balance before opening the stream.`; + +/** + * CFAv1Forwarder address. Pinned identical across every chain in + * SUPERFLUID_CHAIN_IDS by Superfluid's deployment design. + */ +export const CFA_FORWARDER_ADDRESS = + "0xcfA132E353cB4E398080B9700609bb008eceB125"; + +const CFA_FORWARDER_ABI = JSON.stringify([ + { + type: "function", + name: "createFlow", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "sender", type: "address" }, + { name: "receiver", type: "address" }, + { name: "flowRate", type: "int96" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "updateFlow", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "sender", type: "address" }, + { name: "receiver", type: "address" }, + { name: "flowRate", type: "int96" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "deleteFlow", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "sender", type: "address" }, + { name: "receiver", type: "address" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "getFlowInfo", + stateMutability: "view", + inputs: [ + { name: "token", type: "address" }, + { name: "sender", type: "address" }, + { name: "receiver", type: "address" }, + ], + outputs: [ + { name: "lastUpdated", type: "uint256" }, + { name: "flowRate", type: "int96" }, + { name: "deposit", type: "uint256" }, + { name: "owedDeposit", type: "uint256" }, + ], + }, + { + type: "function", + name: "getAccountFlowrate", + stateMutability: "view", + inputs: [ + { name: "token", type: "address" }, + { name: "account", type: "address" }, + ], + outputs: [{ name: "flowRate", type: "int96" }], + }, +]); + +/** + * GDAv1Forwarder address. Pinned identical across every chain in + * SUPERFLUID_CHAIN_IDS by Superfluid's deployment design. + */ +export const GDA_FORWARDER_ADDRESS = + "0x6DA13Bde224A05a288748d857b9e7DDEffd1dE08"; + +const GDA_FORWARDER_ABI = JSON.stringify([ + { + type: "function", + name: "createPool", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "admin", type: "address" }, + { + name: "config", + type: "tuple", + components: [ + { name: "transferabilityForUnitsOwner", type: "bool" }, + { name: "distributionFromAnyAddress", type: "bool" }, + ], + }, + ], + outputs: [ + { name: "", type: "bool" }, + { name: "pool", type: "address" }, + ], + }, + { + type: "function", + name: "updateMemberUnits", + stateMutability: "nonpayable", + inputs: [ + { name: "pool", type: "address" }, + { name: "member", type: "address" }, + { name: "units", type: "uint128" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "distribute", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "from", type: "address" }, + { name: "pool", type: "address" }, + { name: "amount", type: "uint256" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "distributeFlow", + stateMutability: "nonpayable", + inputs: [ + { name: "token", type: "address" }, + { name: "from", type: "address" }, + { name: "pool", type: "address" }, + { name: "flowRate", type: "int96" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "connectPool", + stateMutability: "nonpayable", + inputs: [ + { name: "pool", type: "address" }, + { name: "userData", type: "bytes" }, + ], + outputs: [{ name: "", type: "bool" }], + }, + { + type: "function", + name: "getNetFlow", + stateMutability: "view", + inputs: [ + { name: "token", type: "address" }, + { name: "account", type: "address" }, + ], + outputs: [{ name: "", type: "int96" }], + }, +]); + +const SUPER_TOKEN_ABI = JSON.stringify([ + { + type: "function", + name: "upgrade", + stateMutability: "nonpayable", + inputs: [{ name: "amount", type: "uint256" }], + outputs: [], + }, + { + type: "function", + name: "downgrade", + stateMutability: "nonpayable", + inputs: [{ name: "amount", type: "uint256" }], + outputs: [], + }, + { + type: "function", + name: "balanceOf", + stateMutability: "view", + inputs: [{ name: "account", type: "address" }], + outputs: [{ name: "", type: "uint256" }], + }, + { + type: "function", + name: "getUnderlyingToken", + stateMutability: "view", + inputs: [], + outputs: [{ name: "", type: "address" }], + }, + { + type: "function", + name: "updateFlowOperatorPermissions", + stateMutability: "nonpayable", + inputs: [ + { name: "flowOperator", type: "address" }, + { name: "permissions", type: "uint8" }, + { name: "flowRateAllowance", type: "int96" }, + ], + outputs: [{ name: "", type: "bool" }], + }, +]); + +export default defineProtocol({ + name: "Superfluid", + slug: "superfluid", + description: + "Programmable streaming payments -- open per-second money streams between addresses, distribute pro-rata to pool members, and wrap/unwrap SuperTokens", + website: "https://superfluid.org", + icon: "/protocols/superfluid.png", + + contracts: { + cfaForwarder: { + label: "Superfluid CFAv1 Forwarder", + addresses: sameOnAllChains(CFA_FORWARDER_ADDRESS), + abi: CFA_FORWARDER_ABI, + }, + gdaForwarder: { + label: "Superfluid GDAv1 Forwarder", + addresses: sameOnAllChains(GDA_FORWARDER_ADDRESS), + abi: GDA_FORWARDER_ABI, + }, + superToken: { + label: "Superfluid SuperToken", + addresses: {}, + abi: SUPER_TOKEN_ABI, + userSpecifiedAddress: true, + }, + }, + + actions: [ + { + slug: "create-flow", + label: "Open Money Stream", + description: + "Open a continuous wei/sec stream of a SuperToken from sender to receiver", + type: "write", + contract: "cfaForwarder", + function: "createFlow", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "sender", type: "address", label: "Sender Address" }, + { name: "receiver", type: "address", label: "Receiver Address" }, + { + name: "flowRate", + type: "int96", + label: "Flow Rate (wei/sec)", + helpTip: CREATE_FLOW_RATE_HELP, + }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "update-flow", + label: "Update Stream Rate", + description: + "Change the wei/sec rate of an existing stream. Use delete-flow to close a stream instead of setting rate to 0.", + type: "write", + contract: "cfaForwarder", + function: "updateFlow", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "sender", type: "address", label: "Sender Address" }, + { name: "receiver", type: "address", label: "Receiver Address" }, + { + name: "flowRate", + type: "int96", + label: "New Flow Rate (wei/sec)", + helpTip: FLOW_RATE_HELP, + }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "delete-flow", + label: "Close Money Stream", + description: "Close an open stream between sender and receiver", + type: "write", + contract: "cfaForwarder", + function: "deleteFlow", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "sender", type: "address", label: "Sender Address" }, + { name: "receiver", type: "address", label: "Receiver Address" }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "get-flow", + label: "Read Flow Between Two Addresses", + description: + "Read the current flow rate, deposit, and last-updated timestamp for a stream between two addresses", + type: "read", + contract: "cfaForwarder", + function: "getFlowInfo", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "sender", type: "address", label: "Sender Address" }, + { name: "receiver", type: "address", label: "Receiver Address" }, + ], + outputs: [ + { + name: "lastUpdated", + type: "uint256", + label: "Last Updated (unix seconds)", + }, + { + name: "flowRate", + type: "int96", + label: "Flow Rate (wei/sec)", + }, + { + name: "deposit", + type: "uint256", + label: "Deposit (wei)", + decimals: 18, + }, + { + name: "owedDeposit", + type: "uint256", + label: "Owed Deposit (wei)", + decimals: 18, + }, + ], + }, + { + slug: "get-cfa-net-flow", + label: "Read CFA Net Flow Rate of an Address", + description: + "Read an address's net flow rate from CFA streams only (positive = net receiver, negative = net sender). Excludes GDA pool distributions -- use get-net-flow for the combined CFA+GDA reading.", + type: "read", + contract: "cfaForwarder", + function: "getAccountFlowrate", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "account", type: "address", label: "Account Address" }, + ], + outputs: [ + { + name: "flowRate", + type: "int96", + label: "CFA Net Flow Rate (wei/sec, signed)", + }, + ], + }, + { + slug: "get-net-flow", + label: "Read Net Flow Rate of an Address", + description: + "Read an address's net flow rate for a SuperToken, combining CFA streams and GDA pool distributions (positive = net receiver, negative = net sender). Use get-cfa-net-flow if you need CFA-only.", + type: "read", + contract: "gdaForwarder", + function: "getNetFlow", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "account", type: "address", label: "Account Address" }, + ], + outputs: [ + { + name: "flowRate", + type: "int96", + label: "Net Flow Rate (wei/sec, signed)", + }, + ], + }, + { + slug: "create-pool", + label: "Create Distribution Pool", + description: + "Create a GDA distribution pool with the supplied address as administrator. The new pool address is emitted in the PoolCreated event -- chain a web3.query-events call after this action filtered by the returned tx hash to capture it.", + type: "write", + contract: "gdaForwarder", + function: "createPool", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "admin", type: "address", label: "Pool Admin Address" }, + { + name: "transferabilityForUnitsOwner", + type: "bool", + label: "Transferability For Units Owner", + default: "false", + helpTip: + "If true, members can transfer their pool units to other addresses. Most pools leave this false.", + }, + { + name: "distributionFromAnyAddress", + type: "bool", + label: "Distribution From Any Address", + default: "false", + helpTip: + "If true, any address can call distribute/distributeFlow into this pool. If false, only the pool admin can. Most pools leave this false.", + }, + ], + }, + { + slug: "update-member-units", + label: "Set Member Units in a Pool", + description: + "Set a recipient's pro-rata share in a distribution pool. New members must call connect-pool from their own wallet before they receive distributions.", + type: "write", + contract: "gdaForwarder", + function: "updateMemberUnits", + inputs: [ + { name: "pool", type: "address", label: "Pool Address" }, + { name: "member", type: "address", label: "Member Address" }, + { name: "units", type: "uint128", label: "Units" }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "distribute", + label: "Instant Distribution to a Pool", + description: + "Push a one-shot distribution into a pool. Amount divides pro-rata across members by their unit share.", + type: "write", + contract: "gdaForwarder", + function: "distribute", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "from", type: "address", label: "Sender Address" }, + { name: "pool", type: "address", label: "Pool Address" }, + { name: "amount", type: "uint256", label: "Amount (wei)" }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "distribute-flow", + label: "Stream Into a Pool", + description: + "Open a continuous stream into a pool. Members receive their pro-rata share by the second; updating member units changes the split in real time.", + type: "write", + contract: "gdaForwarder", + function: "distributeFlow", + inputs: [ + { name: "token", type: "address", label: "SuperToken Address" }, + { name: "from", type: "address", label: "Sender Address" }, + { name: "pool", type: "address", label: "Pool Address" }, + { + name: "flowRate", + type: "int96", + label: "Flow Rate (wei/sec)", + helpTip: FLOW_RATE_HELP, + }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "connect-pool", + label: "Connect to a Pool (Member Opt-In)", + description: + "Members must call this from their own wallet to start receiving distributions. Without this, units exist but no money flows.", + type: "write", + contract: "gdaForwarder", + function: "connectPool", + inputs: [ + { name: "pool", type: "address", label: "Pool Address" }, + { + name: "userData", + type: "bytes", + label: "User Data", + default: "0x", + advanced: true, + }, + ], + }, + { + slug: "wrap", + label: "Wrap to SuperToken", + description: + "Wrap an underlying ERC-20 amount into its SuperToken. Requires a prior web3.approve-token call against the SuperToken address.", + type: "write", + contract: "superToken", + function: "upgrade", + inputs: [{ name: "amount", type: "uint256", label: "Amount (wei)" }], + }, + { + slug: "unwrap", + label: "Unwrap from SuperToken", + description: "Unwrap a SuperToken amount back to its underlying ERC-20", + type: "write", + contract: "superToken", + function: "downgrade", + inputs: [{ name: "amount", type: "uint256", label: "Amount (wei)" }], + }, + { + slug: "grant-flow-operator", + label: "Grant Flow-Operator Permissions", + description: + "Authorize another address to manage your flows of this SuperToken up to a wei/sec allowance", + type: "write", + contract: "superToken", + function: "updateFlowOperatorPermissions", + inputs: [ + { + name: "flowOperator", + type: "address", + label: "Flow Operator Address", + }, + { + name: "permissions", + type: "uint8", + label: "Permissions Bitmap", + helpTip: + "Bitmask: 1 = create, 2 = update, 4 = delete, 7 = all three. Combine via bitwise OR.", + }, + { + name: "flowRateAllowance", + type: "int96", + label: "Flow Rate Allowance (wei/sec)", + helpTip: FLOW_RATE_HELP, + }, + ], + }, + { + slug: "get-super-token-balance", + label: "Get SuperToken Balance", + description: "Read an address's current SuperToken balance", + type: "read", + contract: "superToken", + function: "balanceOf", + inputs: [{ name: "account", type: "address", label: "Account Address" }], + outputs: [ + { + name: "balance", + type: "uint256", + label: "Balance (wei)", + decimals: 18, + }, + ], + }, + { + slug: "get-underlying-token", + label: "Get Underlying ERC-20 Address", + description: + "Read the underlying ERC-20 address for this SuperToken (the token that gets escrowed when you wrap)", + type: "read", + contract: "superToken", + function: "getUnderlyingToken", + inputs: [], + outputs: [ + { + name: "underlying", + type: "address", + label: "Underlying ERC-20 Address", + }, + ], + }, + ], +}); diff --git a/public/protocols/superfluid.png b/public/protocols/superfluid.png new file mode 100644 index 000000000..9b5295e8b Binary files /dev/null and b/public/protocols/superfluid.png differ diff --git a/tests/integration/fixtures/superfluid-workflows/create-pool.json b/tests/integration/fixtures/superfluid-workflows/create-pool.json new file mode 100644 index 000000000..80542fdc4 --- /dev/null +++ b/tests/integration/fixtures/superfluid-workflows/create-pool.json @@ -0,0 +1,42 @@ +{ + "name": "Superfluid create-pool demo (KEEP-415 Fix 1)", + "description": "Real on-chain write: create a GDA pool on Sepolia. Exercises the flat (bool,bool) PoolConfig reshape end-to-end through the keeperhub workflow pipeline.", + "nodes": [ + { + "id": "trigger-1", + "type": "trigger", + "position": { "x": 0, "y": 0 }, + "data": { + "type": "trigger", + "label": "", + "config": { "triggerType": "Manual" }, + "status": "idle", + "description": "" + } + }, + { + "id": "step-1", + "type": "action", + "position": { "x": 272, "y": 0 }, + "data": { + "type": "action", + "label": "", + "config": { + "actionType": "superfluid/create-pool", + "network": "11155111", + "token": "0xb598E6C621618a9f63788816ffb50Ee2862D443B", + "admin": "0x42d92ec2c2cd8ba6ab5f65079e46975cbcdc63ef", + "transferabilityForUnitsOwner": "false", + "distributionFromAnyAddress": "false", + "integrationId": "t6xtd727ow2up79uy6v0y", + "_protocolMeta": "{\"protocolSlug\":\"superfluid\",\"contractKey\":\"gdaForwarder\",\"functionName\":\"createPool\",\"actionType\":\"write\"}" + }, + "status": "idle", + "description": "" + } + } + ], + "edges": [ + { "id": "e1", "source": "trigger-1", "target": "step-1" } + ] +} diff --git a/tests/integration/fixtures/superfluid-workflows/get-net-flow.json b/tests/integration/fixtures/superfluid-workflows/get-net-flow.json new file mode 100644 index 000000000..35080c176 --- /dev/null +++ b/tests/integration/fixtures/superfluid-workflows/get-net-flow.json @@ -0,0 +1,39 @@ +{ + "name": "Superfluid net-flow probe (KEEP-415 demo)", + "description": "Reads the combined CFA+GDA net flow rate on Sepolia fUSDCx. Exercises the new get-net-flow action introduced in this PR.", + "nodes": [ + { + "id": "trigger-1", + "type": "trigger", + "position": { "x": 0, "y": 0 }, + "data": { + "type": "trigger", + "label": "", + "config": { "triggerType": "Manual" }, + "status": "idle", + "description": "" + } + }, + { + "id": "step-1", + "type": "action", + "position": { "x": 272, "y": 0 }, + "data": { + "type": "action", + "label": "", + "config": { + "actionType": "superfluid/get-net-flow", + "network": "11155111", + "token": "0xb598E6C621618a9f63788816ffb50Ee2862D443B", + "account": "0x0000000000000000000000000000000000000001", + "_protocolMeta": "{\"protocolSlug\":\"superfluid\",\"contractKey\":\"gdaForwarder\",\"functionName\":\"getNetFlow\",\"actionType\":\"read\"}" + }, + "status": "idle", + "description": "" + } + } + ], + "edges": [ + { "id": "e1", "source": "trigger-1", "target": "step-1" } + ] +} diff --git a/tests/integration/fixtures/superfluid-workflows/grant-flow-operator-quirky.json b/tests/integration/fixtures/superfluid-workflows/grant-flow-operator-quirky.json new file mode 100644 index 000000000..7436c92b7 --- /dev/null +++ b/tests/integration/fixtures/superfluid-workflows/grant-flow-operator-quirky.json @@ -0,0 +1,42 @@ +{ + "name": "Superfluid grant-flow-operator demo (KEEP-415)", + "description": "On-chain write demo: grant a no-op flow-operator permission on fUSDCx. Exercises the full keeperhub -> Superfluid write pipeline.", + "nodes": [ + { + "id": "trigger-1", + "type": "trigger", + "position": { "x": 0, "y": 0 }, + "data": { + "type": "trigger", + "label": "", + "config": { "triggerType": "Manual" }, + "status": "idle", + "description": "" + } + }, + { + "id": "step-1", + "type": "action", + "position": { "x": 272, "y": 0 }, + "data": { + "type": "action", + "label": "", + "config": { + "actionType": "superfluid/grant-flow-operator", + "network": "11155111", + "contractAddress": "0xb598E6C621618a9f63788816ffb50Ee2862D443B", + "flowOperator": "0x0000000000000000000000000000000000000001", + "permissions": "1", + "flowRateAllowance": "0", + "integrationId": "t6xtd727ow2up79uy6v0y", + "_protocolMeta": "{\"protocolSlug\":\"superfluid\",\"contractKey\":\"superToken\",\"functionName\":\"updateFlowOperatorPermissions\",\"actionType\":\"write\"}" + }, + "status": "idle", + "description": "" + } + } + ], + "edges": [ + { "id": "e1", "source": "trigger-1", "target": "step-1" } + ] +} diff --git a/tests/integration/fixtures/superfluid-workflows/wrap.json b/tests/integration/fixtures/superfluid-workflows/wrap.json new file mode 100644 index 000000000..cb1135d75 --- /dev/null +++ b/tests/integration/fixtures/superfluid-workflows/wrap.json @@ -0,0 +1,60 @@ +{ + "name": "Superfluid wrap demo (KEEP-415)", + "description": "Two-step workflow: approve fUSDCx to spend fUSDC, then wrap 1 fUSDC into 1 fUSDCx. Demonstrates ERC20 -> SuperToken via the deployed runtime.", + "nodes": [ + { + "id": "trigger-1", + "type": "trigger", + "position": { "x": 0, "y": 0 }, + "data": { + "type": "trigger", + "label": "", + "config": { "triggerType": "Manual" }, + "status": "idle", + "description": "" + } + }, + { + "id": "step-approve", + "type": "action", + "position": { "x": 272, "y": 0 }, + "data": { + "type": "action", + "label": "", + "config": { + "actionType": "web3/approve-token", + "network": "11155111", + "tokenConfig": "{\"mode\":\"custom\",\"customToken\":{\"address\":\"0xe72f289584eDA2bE69Cfe487f4638F09bAc920Db\",\"symbol\":\"fUSDC\"}}", + "spenderAddress": "0xb598E6C621618a9f63788816ffb50Ee2862D443B", + "amount": "1", + "integrationId": "t6xtd727ow2up79uy6v0y" + }, + "status": "idle", + "description": "" + } + }, + { + "id": "step-wrap", + "type": "action", + "position": { "x": 544, "y": 0 }, + "data": { + "type": "action", + "label": "", + "config": { + "actionType": "superfluid/wrap", + "network": "11155111", + "contractAddress": "0xb598E6C621618a9f63788816ffb50Ee2862D443B", + "amount": "1000000000000000000", + "integrationId": "t6xtd727ow2up79uy6v0y", + "_protocolMeta": "{\"protocolSlug\":\"superfluid\",\"contractKey\":\"superToken\",\"functionName\":\"upgrade\",\"actionType\":\"write\"}" + }, + "status": "idle", + "description": "" + } + } + ], + "edges": [ + { "id": "e1", "source": "trigger-1", "target": "step-approve" }, + { "id": "e2", "source": "step-approve", "target": "step-wrap" } + ] +} diff --git a/tests/integration/protocol-superfluid-onchain.test.ts b/tests/integration/protocol-superfluid-onchain.test.ts new file mode 100644 index 000000000..c98786349 --- /dev/null +++ b/tests/integration/protocol-superfluid-onchain.test.ts @@ -0,0 +1,380 @@ +/** + * Superfluid On-Chain Integration Tests + * + * Verifies that the Superfluid protocol definition produces valid calldata + * the deployed CFA forwarder, GDA forwarder, and SuperToken contracts + * accept on Sepolia. Catches contract dispatch and ABI-shape mistakes the + * unit-test layer cannot see. + * + * Coverage: every action declared by the protocol gets at least one + * dispatch test (read decodes, write encodes without ABI errors). + * + * Gated on INTEGRATION_TEST_RPC_URL env var - skipped in CI without it. + */ + +import { ethers } from "ethers"; +import { beforeAll, describe, expect, it, vi } from "vitest"; + +// `lib/rpc/providers` transitively imports `lib/safe-fetch` (via the +// safe-ethers adapter), which declares `import "server-only"` and would +// otherwise throw under vitest's Node runtime. +vi.mock("server-only", () => ({})); + +import { coerceArgsForAbi, reshapeArgsForAbi } from "@/lib/abi/struct-args"; +import type { + ProtocolAction, + ProtocolContract, + ProtocolDefinition, +} from "@/lib/protocol-registry"; +import { getRpcProviderFromUrls } from "@/lib/rpc/provider-factory"; +import type { RpcProviderManager } from "@/lib/rpc/providers"; +import { getRpcUrlByChainId } from "@/lib/rpc/rpc-config"; +import superfluidDef, { + CFA_FORWARDER_ADDRESS, + GDA_FORWARDER_ADDRESS, +} from "@/protocols/superfluid"; + +const RPC_URL = process.env.INTEGRATION_TEST_RPC_URL; +const CHAIN_ID = "11155111"; +const SEPOLIA_CHAIN_ID = 11_155_111; +const TEST_ADDRESS = "0x0000000000000000000000000000000000000001"; + +// fUSDCx on Sepolia. The forwarders validate the token argument against the +// Superfluid host registry and revert for unknown addresses, so reads need +// a real SuperToken. fUSDCx is the canonical Sepolia test token; an account +// with no flows returns 0, which is exactly what we want for assertions. +const SEPOLIA_FUSDCX = "0xb598E6C621618a9f63788816ffb50Ee2862D443B"; +// Underlying fUSDC for fUSDCx; getUnderlyingToken should return this +// (proves we're decoding the right slot, not just any address). +const SEPOLIA_FUSDC = "0xe72f289584eDA2bE69Cfe487f4638F09bAc920Db"; + +// Common dummy values for write-action inputs. estimateGas will revert +// with business reverts for most of these (insufficient balance, no flow +// to update, etc.) -- that is fine; we only assert the failure mode is +// not an ABI/encoding error. +const DUMMY_AMOUNT_WEI = "1000000000000000000"; // 1e18 +const DUMMY_FLOW_RATE = "1000000"; // wei/sec, small but non-zero +const DUMMY_UNITS = "1"; +const DUMMY_PERMISSIONS_ALL = "7"; // create+update+delete bitmap +const DUMMY_BYTES = "0x"; + +// Markers we treat as failures: ABI/calldata mistakes the test should catch. +// Anything else (require(false), insufficient balance, etc.) is a business +// revert -- expected when calling write ops from an unfunded test address. +const ENCODING_ERROR_RE = /INVALID_ARGUMENT|could not decode|invalid function/; + +function buildCalldata( + protocol: ProtocolDefinition, + actionSlug: string, + sampleInputs: Record, + contractAddressOverride?: string +): { + to: string; + data: string; + action: ProtocolAction; + contract: ProtocolContract; +} { + const action = protocol.actions.find((a) => a.slug === actionSlug); + if (!action) { + throw new Error(`Action ${actionSlug} not found`); + } + + const contract = protocol.contracts[action.contract]; + if (!contract.abi) { + throw new Error(`Contract ${action.contract} has no ABI`); + } + + const contractAddress = + contractAddressOverride ?? contract.addresses[CHAIN_ID]; + if (!contractAddress) { + throw new Error( + `Contract ${action.contract} not on chain ${CHAIN_ID} and no override given` + ); + } + + const rawArgs = action.inputs.map( + (inp) => sampleInputs[inp.name] ?? inp.default ?? "" + ); + + const abi = JSON.parse(contract.abi); + const functionAbi = abi.find( + (f: { name: string; type: string }) => + f.type === "function" && f.name === action.function + ); + // Reproduce the production pipeline: reshape flat args into tuples per + // ABI, then coerce stringly-typed leaves (bool "false" -> false) before + // encoding. Same order as plugins/web3/steps/write-contract-core.ts. + const reshaped = reshapeArgsForAbi(rawArgs, functionAbi); + const args = coerceArgsForAbi(reshaped, functionAbi); + const iface = new ethers.Interface(abi); + const data = iface.encodeFunctionData(action.function, args); + + return { to: contractAddress, data, action, contract }; +} + +describe.skipIf(!RPC_URL)("Superfluid on-chain integration", () => { + let manager: RpcProviderManager; + + beforeAll(async () => { + if (!RPC_URL) { + return; + } + manager = await getRpcProviderFromUrls( + RPC_URL, + getRpcUrlByChainId(SEPOLIA_CHAIN_ID, "fallback"), + SEPOLIA_CHAIN_ID, + "sepolia" + ); + }); + + // -- helpers ------------------------------------------------------------- + + async function callAndDecode( + slug: string, + inputs: Record, + contractAddressOverride?: string + ): Promise<{ + decoded: ethers.Result; + contract: ProtocolContract; + action: ProtocolAction; + to: string; + }> { + const { to, data, contract, action } = buildCalldata( + superfluidDef, + slug, + inputs, + contractAddressOverride + ); + const result = await manager.executeWithFailover((p) => + p.call({ to, data }) + ); + const abi = JSON.parse(contract.abi as string); + const iface = new ethers.Interface(abi); + const decoded = iface.decodeFunctionResult(action.function, result); + return { decoded, contract, action, to }; + } + + // Returns the error message from estimateGas, or "" if it succeeded. The + // test then asserts the message doesn't contain ABI-error markers. + async function estimateGasError( + slug: string, + inputs: Record, + contractAddressOverride?: string + ): Promise { + const { to, data } = buildCalldata( + superfluidDef, + slug, + inputs, + contractAddressOverride + ); + try { + await manager.executeWithFailover((p) => + p.estimateGas({ to, data, from: TEST_ADDRESS }) + ); + return ""; + } catch (error) { + return String(error); + } + } + + // -- CFA reads ----------------------------------------------------------- + + it("get-flow: returns the four expected CFA flow-info outputs", async () => { + const { decoded, to } = await callAndDecode("get-flow", { + token: SEPOLIA_FUSDCX, + sender: TEST_ADDRESS, + receiver: TEST_ADDRESS, + }); + expect(to).toBe(CFA_FORWARDER_ADDRESS); + expect(decoded).toHaveLength(4); + }, 15_000); + + it("get-cfa-net-flow: dispatches to cfaForwarder.getAccountFlowrate", async () => { + const { decoded, to } = await callAndDecode("get-cfa-net-flow", { + token: SEPOLIA_FUSDCX, + account: TEST_ADDRESS, + }); + expect(to).toBe(CFA_FORWARDER_ADDRESS); + expect(typeof decoded[0]).toBe("bigint"); + }, 15_000); + + // -- GDA reads ----------------------------------------------------------- + + it("get-net-flow: dispatches to gdaForwarder.getNetFlow (combined CFA+GDA)", async () => { + const { decoded, to } = await callAndDecode("get-net-flow", { + token: SEPOLIA_FUSDCX, + account: TEST_ADDRESS, + }); + expect(to).toBe(GDA_FORWARDER_ADDRESS); + expect(typeof decoded[0]).toBe("bigint"); + }, 15_000); + + // -- SuperToken reads (userSpecifiedAddress) ----------------------------- + + it("get-super-token-balance: dispatches to the user-supplied SuperToken", async () => { + const { decoded } = await callAndDecode( + "get-super-token-balance", + { account: TEST_ADDRESS }, + SEPOLIA_FUSDCX + ); + expect(typeof decoded[0]).toBe("bigint"); + }, 15_000); + + it("get-underlying-token: returns the fUSDC underlying for fUSDCx", async () => { + const { decoded } = await callAndDecode( + "get-underlying-token", + {}, + SEPOLIA_FUSDCX + ); + expect((decoded[0] as string).toLowerCase()).toBe( + SEPOLIA_FUSDC.toLowerCase() + ); + }, 15_000); + + // -- CFA writes ---------------------------------------------------------- + + it("create-flow: encodes against cfaForwarder.createFlow", async () => { + const msg = await estimateGasError("create-flow", { + token: SEPOLIA_FUSDCX, + sender: TEST_ADDRESS, + receiver: TEST_ADDRESS, + flowRate: DUMMY_FLOW_RATE, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("update-flow: encodes against cfaForwarder.updateFlow", async () => { + const msg = await estimateGasError("update-flow", { + token: SEPOLIA_FUSDCX, + sender: TEST_ADDRESS, + receiver: TEST_ADDRESS, + flowRate: DUMMY_FLOW_RATE, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("delete-flow: encodes against cfaForwarder.deleteFlow", async () => { + const msg = await estimateGasError("delete-flow", { + token: SEPOLIA_FUSDCX, + sender: TEST_ADDRESS, + receiver: TEST_ADDRESS, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + // -- GDA writes ---------------------------------------------------------- + + it("create-pool: flat bool inputs reshape into the (bool,bool) PoolConfig tuple", async () => { + const msg = await estimateGasError("create-pool", { + token: SEPOLIA_FUSDCX, + admin: TEST_ADDRESS, + transferabilityForUnitsOwner: "false", + distributionFromAnyAddress: "false", + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("update-member-units: encodes against gdaForwarder.updateMemberUnits", async () => { + const msg = await estimateGasError("update-member-units", { + pool: TEST_ADDRESS, + member: TEST_ADDRESS, + units: DUMMY_UNITS, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("distribute: encodes against gdaForwarder.distribute", async () => { + const msg = await estimateGasError("distribute", { + token: SEPOLIA_FUSDCX, + from: TEST_ADDRESS, + pool: TEST_ADDRESS, + amount: DUMMY_AMOUNT_WEI, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("distribute-flow: encodes int96 flowRate against gdaForwarder.distributeFlow", async () => { + const msg = await estimateGasError("distribute-flow", { + token: SEPOLIA_FUSDCX, + from: TEST_ADDRESS, + pool: TEST_ADDRESS, + flowRate: DUMMY_FLOW_RATE, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("connect-pool: encodes against gdaForwarder.connectPool", async () => { + const msg = await estimateGasError("connect-pool", { + pool: TEST_ADDRESS, + userData: DUMMY_BYTES, + }); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + // -- SuperToken writes (userSpecifiedAddress) ---------------------------- + + it("wrap: encodes uint256 amount against superToken.upgrade", async () => { + const msg = await estimateGasError( + "wrap", + { amount: DUMMY_AMOUNT_WEI }, + SEPOLIA_FUSDCX + ); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("unwrap: encodes uint256 amount against superToken.downgrade", async () => { + const msg = await estimateGasError( + "unwrap", + { amount: DUMMY_AMOUNT_WEI }, + SEPOLIA_FUSDCX + ); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + it("grant-flow-operator: encodes (address, uint8, int96) against superToken.updateFlowOperatorPermissions", async () => { + const msg = await estimateGasError( + "grant-flow-operator", + { + flowOperator: TEST_ADDRESS, + permissions: DUMMY_PERMISSIONS_ALL, + flowRateAllowance: DUMMY_FLOW_RATE, + }, + SEPOLIA_FUSDCX + ); + expect(msg).not.toMatch(ENCODING_ERROR_RE); + }, 15_000); + + // -- Coverage check ------------------------------------------------------ + + it("every declared action has at least one dispatch test in this file", () => { + const declared = new Set(superfluidDef.actions.map((a) => a.slug)); + const tested = new Set([ + "get-flow", + "get-cfa-net-flow", + "get-net-flow", + "get-super-token-balance", + "get-underlying-token", + "create-flow", + "update-flow", + "delete-flow", + "create-pool", + "update-member-units", + "distribute", + "distribute-flow", + "connect-pool", + "wrap", + "unwrap", + "grant-flow-operator", + ]); + const missing = [...declared].filter((s) => !tested.has(s)); + const stale = [...tested].filter((s) => !declared.has(s)); + expect(missing).toEqual([]); + expect(stale).toEqual([]); + }); +}); diff --git a/tests/integration/protocol-superfluid-workflow-fixtures.test.ts b/tests/integration/protocol-superfluid-workflow-fixtures.test.ts new file mode 100644 index 000000000..084db175c --- /dev/null +++ b/tests/integration/protocol-superfluid-workflow-fixtures.test.ts @@ -0,0 +1,134 @@ +/** + * Superfluid Workflow Fixture Validation + * + * The fixtures in `fixtures/superfluid-workflows/` are the canonical + * workflow JSONs used to verify Superfluid actions end-to-end against a + * deployed PR environment. They double as a regression set: if the + * protocol drifts (action removed, contract key renamed, chain dropped), + * these fixtures should fail to validate against the live registry. + * + * This test does NOT make network calls. It only checks that each + * fixture's references agree with `protocols/superfluid.ts`. Runs in CI + * unconditionally. + */ + +import fs from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import { describe, expect, it } from "vitest"; +import superfluidDef, { SUPERFLUID_CHAIN_IDS } from "@/protocols/superfluid"; + +const FIXTURE_DIR = path.join( + path.dirname(fileURLToPath(import.meta.url)), + "fixtures/superfluid-workflows" +); +const SUPPORTED_CHAINS: readonly string[] = SUPERFLUID_CHAIN_IDS; + +type WorkflowConfig = { + actionType?: string; + network?: string; + _protocolMeta?: string; + [key: string]: unknown; +}; + +type WorkflowNode = { + id: string; + type: "trigger" | "action"; + data: { config: WorkflowConfig }; +}; + +type Workflow = { + name: string; + nodes: WorkflowNode[]; + edges: Array<{ id: string; source: string; target: string }>; +}; + +type ProtocolMeta = { + protocolSlug: string; + contractKey: string; + functionName: string; + actionType: "read" | "write"; +}; + +function loadFixtures(): Array<{ file: string; workflow: Workflow }> { + const files = fs + .readdirSync(FIXTURE_DIR) + .filter((f) => f.endsWith(".json")) + .sort(); + return files.map((file) => { + const raw = fs.readFileSync(path.join(FIXTURE_DIR, file), "utf-8"); + return { file, workflow: JSON.parse(raw) as Workflow }; + }); +} + +const fixtures = loadFixtures(); +const SUPERFLUID_PREFIX = "superfluid/"; + +describe("Superfluid workflow fixtures", () => { + it("fixture directory contains at least one JSON", () => { + expect(fixtures.length).toBeGreaterThan(0); + }); + + for (const { file, workflow } of fixtures) { + describe(file, () => { + it("has a name and at least one trigger plus one action node", () => { + expect(workflow.name).toBeTruthy(); + expect(workflow.nodes.some((n) => n.type === "trigger")).toBe(true); + expect(workflow.nodes.some((n) => n.type === "action")).toBe(true); + }); + + const superfluidActions = workflow.nodes.filter( + (n) => + n.type === "action" && + typeof n.data.config.actionType === "string" && + n.data.config.actionType.startsWith(SUPERFLUID_PREFIX) + ); + + for (const node of superfluidActions) { + const config = node.data.config; + const slug = (config.actionType as string).slice( + SUPERFLUID_PREFIX.length + ); + + describe(`action node ${node.id} (${slug})`, () => { + const action = superfluidDef.actions.find((a) => a.slug === slug); + + it("references a declared Superfluid action", () => { + expect(action).toBeDefined(); + }); + + it("network is a Superfluid-supported chain ID", () => { + expect(config.network).toBeTypeOf("string"); + expect(SUPPORTED_CHAINS).toContain(config.network); + }); + + it("_protocolMeta parses and matches the action definition", () => { + if (!action) { + return; + } + expect(typeof config._protocolMeta).toBe("string"); + const meta = JSON.parse( + config._protocolMeta as string + ) as ProtocolMeta; + expect(meta.protocolSlug).toBe("superfluid"); + expect(meta.contractKey).toBe(action.contract); + expect(meta.functionName).toBe(action.function); + expect(meta.actionType).toBe(action.type); + }); + + it("required input fields are all present in config", () => { + if (!action) { + return; + } + const requiredInputs = action.inputs.filter( + (inp) => inp.required ?? inp.default === undefined + ); + for (const inp of requiredInputs) { + expect(config).toHaveProperty(inp.name); + } + }); + }); + } + }); + } +}); diff --git a/tests/unit/agentic-wallet-workflow-binding.test.ts b/tests/unit/agentic-wallet-workflow-binding.test.ts index a690f6a3d..94e0436ce 100644 --- a/tests/unit/agentic-wallet-workflow-binding.test.ts +++ b/tests/unit/agentic-wallet-workflow-binding.test.ts @@ -59,7 +59,7 @@ vi.mock("@/lib/db/schema", () => ({ organizationWallets: { _table: "para_wallets" }, })); -const { verifyWorkflowBinding } = await import( +const { verifyWorkflowBinding, KNOWN_DATA_CHAIN_IDS } = await import( "@/lib/agentic-wallet/workflow-binding" ); @@ -291,10 +291,10 @@ describe("verifyWorkflowBinding", () => { }); it("rejects an unrecognised wf.chain tag (defensive — no silent widening)", async () => { - // wf.chain is a non-null string we cannot normalise. Treat as - // mismatch rather than falling through to permissive null branch, - // so a typo or future chain stored as "ethereum" can never pass - // a stolen-HMAC attacker's request through. + // wf.chain is a non-null string we cannot classify (slug form, not in + // the data-chain whitelist, not a payment chain). Treat as mismatch + // rather than falling through to permissive null branch, so a typo or + // future chain stored as "ethereum" / "9999" can never pass through. queueWorkflow({ chain: "ethereum" }); const rBase = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); expect(rBase).toMatchObject({ @@ -303,7 +303,7 @@ describe("verifyWorkflowBinding", () => { code: "CHAIN_MISMATCH", }); - queueWorkflow({ chain: "1" }); + queueWorkflow({ chain: "9999" }); const rTempo = await verifyWorkflowBinding(SLUG, "tempo", "", "0"); expect(rTempo).toMatchObject({ ok: false, @@ -312,4 +312,205 @@ describe("verifyWorkflowBinding", () => { }); }); }); + + // KEEP-432 (Fix-pack-5): listings whose chain identifies a data chain + // (Ethereum, Optimism, Polygon, Arbitrum) have no inherent payment-chain + // preference. Either Base x402 or Tempo MPP must be accepted, otherwise + // priced cross-chain-data workflows are unreachable from the wallet. + describe("data-chain listings (KEEP-432)", () => { + it("accepts Base payment for an Ethereum-data listing (chain=1)", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({}); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r.ok).toBe(true); + }); + + it("accepts Tempo payment for an Ethereum-data listing (chain=1)", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({}); + const r = await verifyWorkflowBinding(SLUG, "tempo", "", "0"); + expect(r.ok).toBe(true); + }); + + it("accepts either payment chain for every whitelisted data-chain listing", async () => { + // Sourced directly from production to eliminate manual-sync drift — + // adding a chain id to KNOWN_DATA_CHAIN_IDS now extends test coverage + // automatically. Skip "1" because it's covered by the explicit + // Ethereum tests above. + for (const dataChain of KNOWN_DATA_CHAIN_IDS) { + if (dataChain === "1") { + continue; + } + + queueWorkflow({ chain: dataChain }); + queueWallet({}); + const rBase = await verifyWorkflowBinding( + SLUG, + "base", + CREATOR, + "50000" + ); + expect(rBase.ok).toBe(true); + + queueWorkflow({ chain: dataChain }); + queueWallet({}); + const rTempo = await verifyWorkflowBinding(SLUG, "tempo", "", "0"); + expect(rTempo.ok).toBe(true); + } + }); + + it("still enforces payTo equality on the Base path for a data-chain listing", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({}); + const r = await verifyWorkflowBinding(SLUG, "base", ATTACKER, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "PAYTO_MISMATCH", + }); + }); + + it("still enforces amount equality on the Base path for a data-chain listing", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({}); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "100000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "AMOUNT_MISMATCH", + }); + }); + + it("rejects non-integer amount on Base for a data-chain listing", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({}); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "abc"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "AMOUNT_MISMATCH", + }); + }); + + it("compares payTo case-insensitively on a data-chain listing", async () => { + queueWorkflow({ chain: "1" }); + queueWallet({ walletAddress: CREATOR.toUpperCase() }); + const r = await verifyWorkflowBinding( + SLUG, + "base", + CREATOR.toLowerCase(), + "50000" + ); + expect(r.ok).toBe(true); + }); + + it("returns 403 WORKFLOW_NOT_PAYABLE for a data-chain listing without an active wallet", async () => { + queueWorkflow({ chain: "1" }); + queueWallet(null); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "WORKFLOW_NOT_PAYABLE", + }); + }); + }); + + // KEEP-432 negative-set integrity: lock in the strict-decimal classifier + // semantics so a future producer change (hex serialisation, leading-zero + // normalisation, etc.) can't silently break existing listings or widen + // acceptance to a chain that was meant to reject. + describe("classifier strictness (KEEP-432 negative set)", () => { + it("rejects whitespace-only wf.chain as unrecognised", async () => { + // " " is truthy so reaches classifyChainTag, then trims to "" which + // doesn't match any known tag. + queueWorkflow({ chain: " " }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "CHAIN_MISMATCH", + }); + }); + + it("rejects leading-zero numeric ids ('08453', '01')", async () => { + queueWorkflow({ chain: "08453" }); + const r1 = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r1).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + + queueWorkflow({ chain: "01" }); + const r2 = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r2).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + }); + + it("rejects 0x-prefixed hex chain ids", async () => { + queueWorkflow({ chain: "0x1" }); + const r1 = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r1).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + + queueWorkflow({ chain: "0x2105" }); // 8453 in hex + const r2 = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r2).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + }); + + it("rejects float / decimal chain forms ('8453.0')", async () => { + queueWorkflow({ chain: "8453.0" }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + }); + + it("rejects testnet ids that aren't on the mainnet whitelist", async () => { + // Mainnet-only by intent. If KeeperHub starts supporting testnet + // listings, extend KNOWN_DATA_CHAIN_IDS and update this test. + const testnetIds = [ + "11155111", // Sepolia + "421614", // Arbitrum Sepolia + "80002", // Polygon Amoy + "43113", // Avalanche Fuji + "9746", // Plasma testnet + "16602", // 0G Galileo testnet + ]; + for (const t of testnetIds) { + queueWorkflow({ chain: t }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ ok: false, code: "CHAIN_MISMATCH" }); + } + }); + }); + + // KEEP-432 symmetric cross-chain-proof tests: the original Fix-pack-3 N-1 + // defence is bidirectional. The KEEP-391 test suite covers Base-pinned + + // tempo-caller; these cover the inverse so a future code change that + // flips the equality direction is caught. + describe("payment-chain pin is symmetric (KEEP-432)", () => { + it("rejects tempo-pinned listing with base caller", async () => { + queueWorkflow({ chain: "tempo" }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "CHAIN_MISMATCH", + }); + }); + + it("rejects Tempo-mainnet-id listing with base caller", async () => { + queueWorkflow({ chain: "4217" }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "CHAIN_MISMATCH", + }); + }); + + it("rejects Tempo-testnet-id listing with base caller", async () => { + queueWorkflow({ chain: "4218" }); + const r = await verifyWorkflowBinding(SLUG, "base", CREATOR, "50000"); + expect(r).toMatchObject({ + ok: false, + status: 403, + code: "CHAIN_MISMATCH", + }); + }); + }); }); diff --git a/tests/unit/http-request-template-substitution.test.ts b/tests/unit/http-request-template-substitution.test.ts new file mode 100644 index 000000000..ea5ad94cf --- /dev/null +++ b/tests/unit/http-request-template-substitution.test.ts @@ -0,0 +1,186 @@ +/** + * KEEP-442: HTTP Request system action's `endpoint`, `httpHeaders`, and + * `httpBody` string fields go through the workflow template substitution + * layer (`processTemplates`) like every other config string. The bug was + * that templates resolved to "" when referencing a `code/run-code` + * upstream because the resolver did not unwrap the run-code wrapper + * shape `{ success, result, logs }` -- only the HTTP-style `{ data: ... }` + * wrapper. Adding a `.result` fallback to `resolveFromOutputData` makes + * `{{@runCodeNode:Label.fieldInsideResult}}` resolve correctly and is + * what unblocks the dynamic Bridge Route Optimizer / MEV-Aware Swap + * Quote workflows in the catalog roadmap. + */ +import { describe, expect, it, vi } from "vitest"; + +vi.mock("server-only", () => ({})); + +import { + processTemplates, + resolveFromOutputData, +} from "@/lib/workflow/executor/executor.workflow"; + +type NodeOutputs = Record; + +// Shape that runCodeStep returns when user code does `return { url, ... }`. +const RUN_CODE_OUTPUT = { + success: true, + result: { + url: "https://app.across.to/api/suggested-fees?inputToken=0xA0b&outputToken=0x833&originChainId=1&destinationChainId=8453&amount=1000000", + headers: { "X-Trace": "abc" }, + body: { hello: "world" }, + }, + logs: [], +}; + +const HTTP_STEP_OUTPUT = { + success: true, + data: { + items: [{ name: "First" }, { name: "Second" }], + nextCursor: "cur_42", + }, + status: 200, +}; + +describe("KEEP-442: HTTP Request template substitution from run-code outputs", () => { + describe("resolveFromOutputData", () => { + it("resolves a top-level field directly", () => { + const data = { url: "https://example.com" }; + expect(resolveFromOutputData(data, "url")).toBe("https://example.com"); + }); + + it("unwraps the HTTP-style { data: ... } wrapper for downstream lookups", () => { + expect(resolveFromOutputData(HTTP_STEP_OUTPUT, "items[0].name")).toBe( + "First" + ); + expect(resolveFromOutputData(HTTP_STEP_OUTPUT, "nextCursor")).toBe( + "cur_42" + ); + }); + + it("unwraps the run-code-style { result: ... } wrapper -- the KEEP-442 fix", () => { + expect(resolveFromOutputData(RUN_CODE_OUTPUT, "url")).toBe( + RUN_CODE_OUTPUT.result.url + ); + expect(resolveFromOutputData(RUN_CODE_OUTPUT, "headers.X-Trace")).toBe( + "abc" + ); + expect(resolveFromOutputData(RUN_CODE_OUTPUT, "body.hello")).toBe( + "world" + ); + }); + + it("still honours an explicit `result.` prefix (back-compat)", () => { + expect(resolveFromOutputData(RUN_CODE_OUTPUT, "result.url")).toBe( + RUN_CODE_OUTPUT.result.url + ); + }); + + it("returns undefined when the field path is not found in any wrapper", () => { + expect(resolveFromOutputData(RUN_CODE_OUTPUT, "missing.deep")).toBe( + undefined + ); + }); + + it("prefers top-level over the .result fallback when both have the same key", () => { + const data = { url: "TOP", result: { url: "INNER" } }; + expect(resolveFromOutputData(data, "url")).toBe("TOP"); + }); + + it("prefers .data over .result when both wrappers exist", () => { + const data = { + success: true, + data: { url: "FROM_DATA" }, + result: { url: "FROM_RESULT" }, + }; + expect(resolveFromOutputData(data, "url")).toBe("FROM_DATA"); + }); + + // Documents the intentional fall-through behavior when both wrappers exist + // but the path is missing inside .data: continue searching .result rather + // than stopping. Useful for hypothetical step outputs that carry both + // wrappers; safe today because no shipping step does. + it("falls through from .data to .result when the path is absent inside .data", () => { + const data = { + success: true, + data: { other: "x" }, + result: { url: "FROM_RESULT" }, + }; + expect(resolveFromOutputData(data, "url")).toBe("FROM_RESULT"); + }); + + it("ignores .data when the wrapper value is null (still tries .result)", () => { + const data = { + success: true, + data: null, + result: { url: "FROM_RESULT" }, + }; + expect(resolveFromOutputData(data, "url")).toBe("FROM_RESULT"); + }); + + it("ignores a null .result wrapper (does not throw)", () => { + const data = { success: true, result: null }; + expect(resolveFromOutputData(data, "url")).toBe(undefined); + }); + + // Pre-existing behavior we are pinning, not changing: a primitive + // result (e.g. `return "https://x"` from code/run-code) is rejected by + // hasNestedResultShape because the fallback can only walk into objects. + // The whole-output reference (no field path) returns the wrapper + // because top-level lookup succeeds. + it("does not unwrap a primitive .result; whole-output ref still works", () => { + const data = { success: true, result: "https://primitive", logs: [] }; + expect(resolveFromOutputData(data, "anything")).toBe(undefined); + expect(resolveFromOutputData(data, "")).toEqual(data); + }); + }); + + describe("processTemplates -- HTTP Request fields with run-code upstream", () => { + const outputs: NodeOutputs = { + prep: { label: "Prep", data: RUN_CODE_OUTPUT }, + }; + + it("resolves {{@prep:Prep.url}} in the endpoint field (KEEP-442 repro)", () => { + const config = { + actionType: "HTTP Request", + httpMethod: "GET", + endpoint: "{{@prep:Prep.url}}", + httpHeaders: "{}", + httpBody: "{}", + }; + const processed = processTemplates(config, outputs); + expect(processed.endpoint).toBe(RUN_CODE_OUTPUT.result.url); + }); + + it("resolves templates embedded in httpHeaders and httpBody JSON strings", () => { + const config = { + actionType: "HTTP Request", + httpMethod: "POST", + endpoint: "https://api.example.com/x", + httpHeaders: '{"X-Trace": "{{@prep:Prep.headers.X-Trace}}"}', + httpBody: '{"greeting": "{{@prep:Prep.body.hello}}"}', + }; + const processed = processTemplates(config, outputs); + expect(processed.httpHeaders).toBe('{"X-Trace": "abc"}'); + expect(processed.httpBody).toBe('{"greeting": "world"}'); + }); + + it("resolves templates referencing an HTTP upstream (existing flow stays intact)", () => { + const httpOutputs: NodeOutputs = { + api: { label: "API", data: HTTP_STEP_OUTPUT }, + }; + const config = { + endpoint: "https://x.test/page?cursor={{@api:API.nextCursor}}", + }; + const processed = processTemplates(config, httpOutputs); + expect(processed.endpoint).toBe("https://x.test/page?cursor=cur_42"); + }); + + it("falls back to '' when the referenced node is not in outputs", () => { + const config = { + endpoint: "{{@missing:Whatever.url}}", + }; + const processed = processTemplates(config, outputs); + expect(processed.endpoint).toBe(""); + }); + }); +}); diff --git a/tests/unit/superfluid-protocol.test.ts b/tests/unit/superfluid-protocol.test.ts new file mode 100644 index 000000000..1e39f2254 --- /dev/null +++ b/tests/unit/superfluid-protocol.test.ts @@ -0,0 +1,485 @@ +import { describe, expect, it } from "vitest"; +import { reshapeArgsForAbi } from "@/lib/abi/struct-args"; +import superfluidProtocol, { + CFA_FORWARDER_ADDRESS, + GDA_FORWARDER_ADDRESS, + SUPERFLUID_CHAIN_IDS, +} from "@/protocols/superfluid"; + +const ADDRESS_REGEX = /^0x[0-9a-fA-F]{40}$/; +const KEBAB_CASE_REGEX = /^[a-z][a-z0-9]*(-[a-z0-9]+)*$/; +const CFA_MENTION_REGEX = /CFA/; +const GDA_MENTION_REGEX = /GDA/; +const EXPECTED_CHAINS: string[] = [...SUPERFLUID_CHAIN_IDS]; + +const CFA_FORWARDER = CFA_FORWARDER_ADDRESS; + +type SuperfluidAction = (typeof superfluidProtocol.actions)[number]; + +const findAction = (slug: string): SuperfluidAction | undefined => + superfluidProtocol.actions.find((a) => a.slug === slug); + +describe("Superfluid protocol", () => { + describe("metadata", () => { + it("declares the expected name, slug, and description", () => { + expect(superfluidProtocol.name).toBe("Superfluid"); + expect(superfluidProtocol.slug).toBe("superfluid"); + expect(superfluidProtocol.description).toBeTruthy(); + expect(superfluidProtocol.website).toBe("https://superfluid.org"); + }); + }); + + describe("cfaForwarder contract", () => { + it("declares cfaForwarder with the same address on all six chains", () => { + const contract = superfluidProtocol.contracts.cfaForwarder; + expect(contract).toBeDefined(); + expect(Object.keys(contract.addresses).sort()).toEqual( + [...EXPECTED_CHAINS].sort() + ); + const unique = new Set(Object.values(contract.addresses)); + expect(unique.size).toBe(1); + expect([...unique][0]).toBe(CFA_FORWARDER); + for (const addr of Object.values(contract.addresses)) { + expect(addr).toMatch(ADDRESS_REGEX); + } + }); + + it("ships an inline ABI containing the 5 expected functions", () => { + const contract = superfluidProtocol.contracts.cfaForwarder; + expect(contract.abi).toBeTruthy(); + const abi = JSON.parse(contract.abi as string) as Array<{ + type: string; + name?: string; + }>; + const fnNames = abi + .filter((f) => f.type === "function") + .map((f) => f.name) + .sort(); + expect(fnNames).toEqual( + [ + "createFlow", + "deleteFlow", + "getAccountFlowrate", + "getFlowInfo", + "updateFlow", + ].sort() + ); + }); + }); + + describe("create-flow action", () => { + it("is declared as a write action against cfaForwarder.createFlow", () => { + const action = findAction("create-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("write"); + expect(action?.contract).toBe("cfaForwarder"); + expect(action?.function).toBe("createFlow"); + expect(action?.slug).toMatch(KEBAB_CASE_REGEX); + }); + + it("has the five expected inputs in order", () => { + const action = findAction("create-flow"); + const names = action?.inputs.map((i) => i.name); + expect(names).toEqual([ + "token", + "sender", + "receiver", + "flowRate", + "userData", + ]); + }); + + it("marks userData as advanced with default 0x", () => { + const action = findAction("create-flow"); + const userData = action?.inputs.find((i) => i.name === "userData"); + expect(userData?.advanced).toBe(true); + expect(userData?.default).toBe("0x"); + }); + + it("includes the int96 helpTip on flowRate", () => { + const action = findAction("create-flow"); + const flowRate = action?.inputs.find((i) => i.name === "flowRate"); + expect(flowRate?.helpTip).toContain("Wei per second"); + expect(flowRate?.helpTip).toContain("int96"); + }); + }); + + describe("update-flow action", () => { + it("is declared as a write action against cfaForwarder.updateFlow", () => { + const action = findAction("update-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("write"); + expect(action?.contract).toBe("cfaForwarder"); + expect(action?.function).toBe("updateFlow"); + }); + + it("has the same input shape as create-flow", () => { + const action = findAction("update-flow"); + const names = action?.inputs.map((i) => i.name); + expect(names).toEqual([ + "token", + "sender", + "receiver", + "flowRate", + "userData", + ]); + }); + }); + + describe("delete-flow action", () => { + it("is declared as a write action against cfaForwarder.deleteFlow", () => { + const action = findAction("delete-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("write"); + expect(action?.contract).toBe("cfaForwarder"); + expect(action?.function).toBe("deleteFlow"); + }); + + it("has token/sender/receiver/userData inputs", () => { + const action = findAction("delete-flow"); + const names = action?.inputs.map((i) => i.name); + expect(names).toEqual(["token", "sender", "receiver", "userData"]); + }); + }); + + describe("get-flow action", () => { + it("is declared as a read action against cfaForwarder.getFlowInfo", () => { + const action = findAction("get-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("read"); + expect(action?.contract).toBe("cfaForwarder"); + expect(action?.function).toBe("getFlowInfo"); + }); + + it("declares the four expected outputs, decimals only on token-amount fields", () => { + const action = findAction("get-flow"); + const outputs = action?.outputs ?? []; + expect(outputs.map((o) => o.name)).toEqual([ + "lastUpdated", + "flowRate", + "deposit", + "owedDeposit", + ]); + expect( + outputs.find((o) => o.name === "flowRate")?.decimals + ).toBeUndefined(); + expect(outputs.find((o) => o.name === "deposit")?.decimals).toBe(18); + expect(outputs.find((o) => o.name === "owedDeposit")?.decimals).toBe(18); + }); + }); + + describe("get-cfa-net-flow action", () => { + it("is declared as a read action against cfaForwarder.getAccountFlowrate", () => { + const action = findAction("get-cfa-net-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("read"); + expect(action?.contract).toBe("cfaForwarder"); + expect(action?.function).toBe("getAccountFlowrate"); + }); + + it("returns flowRate as int96 without decimals (rate, not token amount)", () => { + const action = findAction("get-cfa-net-flow"); + const out = action?.outputs?.[0]; + expect(out?.name).toBe("flowRate"); + expect(out?.type).toBe("int96"); + expect(out?.decimals).toBeUndefined(); + }); + + it("description points users to get-net-flow for combined readings", () => { + const action = findAction("get-cfa-net-flow"); + expect(action?.description).toContain("get-net-flow"); + }); + }); + + describe("get-net-flow action", () => { + it("is declared as a read action against gdaForwarder.getNetFlow", () => { + const action = findAction("get-net-flow"); + expect(action).toBeDefined(); + expect(action?.type).toBe("read"); + expect(action?.contract).toBe("gdaForwarder"); + expect(action?.function).toBe("getNetFlow"); + }); + + it("returns flowRate as int96 without decimals (rate, not token amount)", () => { + const action = findAction("get-net-flow"); + const out = action?.outputs?.[0]; + expect(out?.name).toBe("flowRate"); + expect(out?.type).toBe("int96"); + expect(out?.decimals).toBeUndefined(); + }); + + it("description signals it covers both CFA and GDA flows", () => { + const action = findAction("get-net-flow"); + const desc = action?.description ?? ""; + expect(desc).toMatch(CFA_MENTION_REGEX); + expect(desc).toMatch(GDA_MENTION_REGEX); + }); + }); + + describe("gdaForwarder contract", () => { + const GDA_FORWARDER = GDA_FORWARDER_ADDRESS; + + it("declares gdaForwarder with the same address on all six chains", () => { + const contract = superfluidProtocol.contracts.gdaForwarder; + expect(contract).toBeDefined(); + expect(Object.keys(contract.addresses).sort()).toEqual( + [...EXPECTED_CHAINS].sort() + ); + const unique = new Set(Object.values(contract.addresses)); + expect(unique.size).toBe(1); + expect([...unique][0]).toBe(GDA_FORWARDER); + }); + + it("ships an inline ABI with the 6 expected functions", () => { + const contract = superfluidProtocol.contracts.gdaForwarder; + const abi = JSON.parse(contract.abi as string) as Array<{ + type: string; + name?: string; + }>; + const fnNames = abi + .filter((f) => f.type === "function") + .map((f) => f.name) + .sort(); + expect(fnNames).toEqual( + [ + "connectPool", + "createPool", + "distribute", + "distributeFlow", + "getNetFlow", + "updateMemberUnits", + ].sort() + ); + }); + + it("createPool ABI declares the (bool,bool) PoolConfig tuple", () => { + const contract = superfluidProtocol.contracts.gdaForwarder; + const abi = JSON.parse(contract.abi as string) as Array<{ + type: string; + name?: string; + inputs?: Array<{ + name: string; + type: string; + components?: Array<{ name: string; type: string }>; + }>; + }>; + const createPool = abi.find( + (f) => f.type === "function" && f.name === "createPool" + ); + const config = createPool?.inputs?.find((i) => i.name === "config"); + expect(config?.type).toBe("tuple"); + expect(config?.components?.map((c) => c.name)).toEqual([ + "transferabilityForUnitsOwner", + "distributionFromAnyAddress", + ]); + }); + }); + + describe("GDA actions", () => { + it("declares the six expected GDA action slugs", () => { + const slugs = superfluidProtocol.actions + .filter((a) => a.contract === "gdaForwarder") + .map((a) => a.slug) + .sort(); + expect(slugs).toEqual( + [ + "connect-pool", + "create-pool", + "distribute", + "distribute-flow", + "get-net-flow", + "update-member-units", + ].sort() + ); + }); + + it("create-pool flattens the PoolConfig tuple into two top-level bool inputs", () => { + const action = findAction("create-pool"); + const inputNames = action?.inputs.map((i) => i.name); + expect(inputNames).toEqual([ + "token", + "admin", + "transferabilityForUnitsOwner", + "distributionFromAnyAddress", + ]); + const transferability = action?.inputs.find( + (i) => i.name === "transferabilityForUnitsOwner" + ); + const distribution = action?.inputs.find( + (i) => i.name === "distributionFromAnyAddress" + ); + expect(transferability?.type).toBe("bool"); + expect(distribution?.type).toBe("bool"); + }); + + it("create-pool flat args reshape into the (bool,bool) tuple expected by the ABI", () => { + const contract = superfluidProtocol.contracts.gdaForwarder; + const abi = JSON.parse(contract.abi as string) as Array<{ + type: string; + name?: string; + inputs?: Array<{ + name: string; + type: string; + components?: Array<{ name: string; type: string }>; + }>; + }>; + const createPoolAbi = abi.find( + (f) => f.type === "function" && f.name === "createPool" + ); + expect(createPoolAbi).toBeDefined(); + + const flatArgs = [ + "0x0000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000002", + true, + false, + ]; + const reshaped = reshapeArgsForAbi(flatArgs, { + inputs: createPoolAbi?.inputs, + }); + expect(reshaped).toEqual([ + "0x0000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000002", + { + transferabilityForUnitsOwner: true, + distributionFromAnyAddress: false, + }, + ]); + }); + + it("distribute-flow uses int96 flowRate with the shared helpTip", () => { + const action = findAction("distribute-flow"); + const flowRate = action?.inputs.find((i) => i.name === "flowRate"); + expect(flowRate?.type).toBe("int96"); + expect(flowRate?.helpTip).toContain("Wei per second"); + }); + + it("connect-pool documents that members must call from their own wallet", () => { + const action = findAction("connect-pool"); + expect(action?.description.toLowerCase()).toContain("own wallet"); + }); + }); + + describe("superToken contract", () => { + it("declares superToken with userSpecifiedAddress: true", () => { + const contract = superfluidProtocol.contracts.superToken; + expect(contract).toBeDefined(); + expect(contract.userSpecifiedAddress).toBe(true); + }); + + it("ships an inline ABI with the 5 expected functions", () => { + const contract = superfluidProtocol.contracts.superToken; + const abi = JSON.parse(contract.abi as string) as Array<{ + type: string; + name?: string; + }>; + const fnNames = abi + .filter((f) => f.type === "function") + .map((f) => f.name) + .sort(); + expect(fnNames).toEqual( + [ + "balanceOf", + "downgrade", + "getUnderlyingToken", + "updateFlowOperatorPermissions", + "upgrade", + ].sort() + ); + }); + }); + + describe("SuperToken actions", () => { + it("declares the five expected SuperToken action slugs", () => { + const slugs = superfluidProtocol.actions + .filter((a) => a.contract === "superToken") + .map((a) => a.slug) + .sort(); + expect(slugs).toEqual( + [ + "get-super-token-balance", + "get-underlying-token", + "grant-flow-operator", + "unwrap", + "wrap", + ].sort() + ); + }); + + it("wrap is a write action against superToken.upgrade", () => { + const action = findAction("wrap"); + expect(action?.type).toBe("write"); + expect(action?.contract).toBe("superToken"); + expect(action?.function).toBe("upgrade"); + }); + + it("unwrap is a write action against superToken.downgrade", () => { + const action = findAction("unwrap"); + expect(action?.type).toBe("write"); + expect(action?.function).toBe("downgrade"); + }); + + it("grant-flow-operator includes the bitmap helpTip on permissions", () => { + const action = findAction("grant-flow-operator"); + const perms = action?.inputs.find((i) => i.name === "permissions"); + expect(perms?.type).toBe("uint8"); + expect(perms?.helpTip).toContain("1"); + expect(perms?.helpTip).toContain("2"); + expect(perms?.helpTip).toContain("4"); + expect(perms?.helpTip).toContain("7"); + }); + + it("get-super-token-balance returns balance with decimals: 18", () => { + const action = findAction("get-super-token-balance"); + expect(action?.type).toBe("read"); + expect(action?.outputs?.[0]?.decimals).toBe(18); + }); + + it("get-underlying-token returns an address output and takes no inputs", () => { + const action = findAction("get-underlying-token"); + expect(action?.type).toBe("read"); + expect(action?.inputs).toEqual([]); + expect(action?.outputs?.[0]?.type).toBe("address"); + }); + }); + + describe("overall integrity", () => { + it("declares 16 actions in total", () => { + expect(superfluidProtocol.actions).toHaveLength(16); + }); + + it("every action slug is unique kebab-case", () => { + const slugs = superfluidProtocol.actions.map((a) => a.slug); + expect(new Set(slugs).size).toBe(slugs.length); + for (const slug of slugs) { + expect(slug).toMatch(KEBAB_CASE_REGEX); + } + }); + + it("every action references a defined contract", () => { + const contractKeys = new Set(Object.keys(superfluidProtocol.contracts)); + for (const action of superfluidProtocol.actions) { + expect(contractKeys.has(action.contract)).toBe(true); + } + }); + + it("every action's function exists in its contract's ABI", () => { + for (const action of superfluidProtocol.actions) { + const contract = + superfluidProtocol.contracts[ + action.contract as keyof typeof superfluidProtocol.contracts + ]; + const abi = contract.abi + ? (JSON.parse(contract.abi) as Array<{ + type: string; + name?: string; + }>) + : []; + const fnNames = abi + .filter((f) => f.type === "function") + .map((f) => f.name); + expect(fnNames).toContain(action.function); + } + }); + }); +});