diff --git a/packages/engine/src/__tests__/expressions/cache.test.ts b/packages/engine/src/__tests__/expressions/cache.test.ts new file mode 100644 index 0000000..d89c5e9 --- /dev/null +++ b/packages/engine/src/__tests__/expressions/cache.test.ts @@ -0,0 +1,113 @@ +import { describe, it, expect } from 'vitest' +import { LRUCache } from '../../expressions/cache.js' + +describe('LRUCache', () => { + it('stores and retrieves values', () => { + const cache = new LRUCache(10) + cache.set('a', 1) + cache.set('b', 2) + expect(cache.get('a')).toBe(1) + expect(cache.get('b')).toBe(2) + }) + + it('returns undefined for missing keys', () => { + const cache = new LRUCache(10) + expect(cache.get('missing')).toBeUndefined() + }) + + it('reports correct size', () => { + const cache = new LRUCache(10) + expect(cache.size).toBe(0) + cache.set('a', 1) + expect(cache.size).toBe(1) + cache.set('b', 2) + expect(cache.size).toBe(2) + }) + + it('evicts least recently used when at capacity', () => { + const cache = new LRUCache(3) + cache.set('a', 1) + cache.set('b', 2) + cache.set('c', 3) + // At capacity — adding 'd' should evict 'a' (oldest) + cache.set('d', 4) + expect(cache.size).toBe(3) + expect(cache.get('a')).toBeUndefined() + expect(cache.get('b')).toBe(2) + expect(cache.get('c')).toBe(3) + expect(cache.get('d')).toBe(4) + }) + + it('get() makes entry recently used (not evicted next)', () => { + const cache = new LRUCache(3) + cache.set('a', 1) + cache.set('b', 2) + cache.set('c', 3) + // Access 'a' to make it recently used + cache.get('a') + // Adding 'd' should evict 'b' (now the oldest), not 'a' + cache.set('d', 4) + expect(cache.get('a')).toBe(1) + expect(cache.get('b')).toBeUndefined() + expect(cache.get('c')).toBe(3) + expect(cache.get('d')).toBe(4) + }) + + it('updating an existing key moves it to recently used', () => { + const cache = new LRUCache(3) + cache.set('a', 1) + cache.set('b', 2) + cache.set('c', 3) + // Update 'a' with new value + cache.set('a', 10) + // Adding 'd' should evict 'b' (now the oldest) + cache.set('d', 4) + expect(cache.get('a')).toBe(10) + expect(cache.get('b')).toBeUndefined() + }) + + it('has() returns correct presence without affecting order', () => { + const cache = new LRUCache(3) + cache.set('a', 1) + expect(cache.has('a')).toBe(true) + expect(cache.has('b')).toBe(false) + }) + + it('clear() empties the cache', () => { + const cache = new LRUCache(10) + cache.set('a', 1) + cache.set('b', 2) + cache.clear() + expect(cache.size).toBe(0) + expect(cache.get('a')).toBeUndefined() + }) + + it('throws on capacity < 1', () => { + expect(() => new LRUCache(0)).toThrow('capacity must be at least 1') + expect(() => new LRUCache(-5)).toThrow('capacity must be at least 1') + }) + + it('works with capacity of 1', () => { + const cache = new LRUCache(1) + cache.set('a', 1) + expect(cache.get('a')).toBe(1) + cache.set('b', 2) + expect(cache.get('a')).toBeUndefined() + expect(cache.get('b')).toBe(2) + }) + + it('evicts correctly at capacity 1000', () => { + const cache = new LRUCache(1000) + // Fill to capacity + for (let i = 0; i < 1000; i++) { + cache.set(i, i * 10) + } + expect(cache.size).toBe(1000) + // Adding one more should evict key 0 + cache.set(1000, 10000) + expect(cache.size).toBe(1000) + expect(cache.get(0)).toBeUndefined() + expect(cache.get(1)).toBe(10) + expect(cache.get(1000)).toBe(10000) + }) +}) diff --git a/packages/engine/src/__tests__/expressions/interpreter.test.ts b/packages/engine/src/__tests__/expressions/interpreter.test.ts new file mode 100644 index 0000000..85e280d --- /dev/null +++ b/packages/engine/src/__tests__/expressions/interpreter.test.ts @@ -0,0 +1,218 @@ +import { describe, it, expect } from 'vitest' +import { interpretExpression, InterpreterError } from '../../expressions/interpreter.js' +import type { InterpreterContext } from '../../expressions/interpreter.js' + +function makeCtx( + input: unknown = {}, + results: Record = {}, +): InterpreterContext { + return { + input, + results: new Map(Object.entries(results)), + } +} + +describe('interpretExpression', () => { + describe('arithmetic', () => { + it('evaluates 2 + 3 to 5', () => { + const ctx = makeCtx() + expect(interpretExpression('2 + 3', ctx)).toBe(5) + }) + + it('evaluates input.qty * input.price', () => { + const ctx = makeCtx({ qty: 5, price: 10 }) + expect(interpretExpression('input.qty * input.price', ctx)).toBe(50) + }) + + it('evaluates 10 / 3 to floating point', () => { + const ctx = makeCtx() + const result = interpretExpression('10 / 3', ctx) + expect(typeof result).toBe('number') + expect(result).toBeCloseTo(3.3333, 3) + }) + + it('evaluates 10 % 3 to 1', () => { + const ctx = makeCtx() + expect(interpretExpression('10 % 3', ctx)).toBe(1) + }) + + it('division by zero yields Infinity', () => { + const ctx = makeCtx() + expect(interpretExpression('1 / 0', ctx)).toBe(Infinity) + }) + + it('negative division by zero yields -Infinity', () => { + const ctx = makeCtx({ n: -1 }) + expect(interpretExpression('input.n / 0', ctx)).toBe(-Infinity) + }) + + it('preserves operator precedence (2 + 3 * 4 = 14)', () => { + const ctx = makeCtx() + expect(interpretExpression('2 + 3 * 4', ctx)).toBe(14) + }) + + it('evaluates subtraction', () => { + const ctx = makeCtx() + expect(interpretExpression('10 - 3', ctx)).toBe(7) + }) + + it('evaluates string concatenation with +', () => { + const ctx = makeCtx({ name: 'Alice' }) + expect(interpretExpression("'Hello ' + input.name", ctx)).toBe('Hello Alice') + }) + }) + + describe('mixed arithmetic + comparison', () => { + it('evaluates input.qty * input.price > 1000', () => { + const ctx = makeCtx({ qty: 5, price: 300 }) + expect(interpretExpression('input.qty * input.price > 1000', ctx)).toBe(true) + }) + + it('evaluates input.qty * input.price > 1000 (false)', () => { + const ctx = makeCtx({ qty: 2, price: 100 }) + expect(interpretExpression('input.qty * input.price > 1000', ctx)).toBe(false) + }) + + it('evaluates total % 2 === 0 (even check)', () => { + const ctx = makeCtx({ total: 42 }) + expect(interpretExpression('input.total % 2 === 0', ctx)).toBe(true) + }) + }) + + describe('comparison operators', () => { + it('evaluates strict equality', () => { + const ctx = makeCtx({ status: 'active' }) + expect(interpretExpression("input.status === 'active'", ctx)).toBe(true) + }) + + it('evaluates strict inequality', () => { + const ctx = makeCtx({ value: 5 }) + expect(interpretExpression('input.value !== 10', ctx)).toBe(true) + }) + + it('evaluates greater than', () => { + const ctx = makeCtx({ score: 85 }) + expect(interpretExpression('input.score > 50', ctx)).toBe(true) + }) + + it('evaluates less than or equal', () => { + const ctx = makeCtx({ value: 100 }) + expect(interpretExpression('input.value <= 100', ctx)).toBe(true) + }) + }) + + describe('logical operators', () => { + it('evaluates logical AND', () => { + const ctx = makeCtx({ a: true, b: false }) + expect(interpretExpression('input.a && input.b', ctx)).toBe(false) + }) + + it('evaluates logical OR', () => { + const ctx = makeCtx({ a: false, b: true }) + expect(interpretExpression('input.a || input.b', ctx)).toBe(true) + }) + }) + + describe('unary operators', () => { + it('evaluates logical NOT', () => { + const ctx = makeCtx({ cancelled: false }) + expect(interpretExpression('!input.cancelled', ctx)).toBe(true) + }) + + it('evaluates typeof', () => { + const ctx = makeCtx({ value: 42 }) + expect(interpretExpression("typeof input.value === 'number'", ctx)).toBe(true) + }) + }) + + describe('ternary / conditional', () => { + it('evaluates conditional expression (truthy)', () => { + const ctx = makeCtx({ vip: true }) + expect(interpretExpression("input.vip ? 'fast' : 'normal'", ctx)).toBe('fast') + }) + + it('evaluates conditional expression (falsy)', () => { + const ctx = makeCtx({ vip: false }) + expect(interpretExpression("input.vip ? 'fast' : 'normal'", ctx)).toBe('normal') + }) + }) + + describe('template literals', () => { + it('evaluates template literal with interpolation', () => { + const ctx = makeCtx({ status: 'active' }) + expect(interpretExpression('`Status: ${input.status}`', ctx)).toBe('Status: active') + }) + }) + + describe('method calls', () => { + it('evaluates includes()', () => { + const ctx = makeCtx({ name: 'hello world' }) + expect(interpretExpression("input.name.includes('world')", ctx)).toBe(true) + }) + + it('evaluates startsWith()', () => { + const ctx = makeCtx({ name: 'hello' }) + expect(interpretExpression("input.name.startsWith('hel')", ctx)).toBe(true) + }) + + it('evaluates trim()', () => { + const ctx = makeCtx({ value: ' spaced ' }) + expect(interpretExpression('input.value.trim()', ctx)).toBe('spaced') + }) + }) + + describe('Math functions', () => { + it('evaluates Math.abs()', () => { + const ctx = makeCtx({ diff: -42 }) + expect(interpretExpression('Math.abs(input.diff)', ctx)).toBe(42) + }) + + it('evaluates Math.max()', () => { + const ctx = makeCtx({ a: 3, b: 7 }) + expect(interpretExpression('Math.max(input.a, input.b)', ctx)).toBe(7) + }) + + it('evaluates Math.floor()', () => { + const ctx = makeCtx() + expect(interpretExpression('Math.floor(3.7)', ctx)).toBe(3) + }) + + it('accesses Math.PI', () => { + const ctx = makeCtx() + expect(interpretExpression('Math.PI', ctx)).toBeCloseTo(Math.PI, 10) + }) + }) + + describe('node results', () => { + it('accesses previous node result', () => { + const ctx = makeCtx({}, { validate_order: { isValid: true, total: 150 } }) + expect(interpretExpression('validate_order.isValid', ctx)).toBe(true) + expect(interpretExpression('validate_order.total', ctx)).toBe(150) + }) + + it('uses node results in arithmetic', () => { + const ctx = makeCtx( + { taxRate: 0.1 }, + { calc: { subtotal: 100 } }, + ) + expect(interpretExpression('calc.subtotal * input.taxRate', ctx)).toBe(10) + }) + }) + + describe('error handling', () => { + it('throws InterpreterError for unknown identifier', () => { + const ctx = makeCtx() + expect(() => interpretExpression('unknown.value', ctx)).toThrow(InterpreterError) + }) + + it('throws on property access on null', () => { + const ctx = makeCtx({ value: null }) + expect(() => interpretExpression('input.value.x', ctx)).toThrow(InterpreterError) + }) + + it('throws on disallowed method', () => { + const ctx = makeCtx({ items: [1, 2] }) + expect(() => interpretExpression('input.items.forEach()', ctx)).toThrow() + }) + }) +}) diff --git a/packages/engine/src/__tests__/expressions/parse-cache.test.ts b/packages/engine/src/__tests__/expressions/parse-cache.test.ts new file mode 100644 index 0000000..ec5b839 --- /dev/null +++ b/packages/engine/src/__tests__/expressions/parse-cache.test.ts @@ -0,0 +1,56 @@ +import { describe, it, expect, beforeEach } from 'vitest' +import { parseExpression, clearParseCache } from '../../expressions/parser.js' + +describe('expression parse cache', () => { + beforeEach(() => { + clearParseCache() + }) + + it('returns identical result objects for the same expression', () => { + const result1 = parseExpression('input.x > 0') + const result2 = parseExpression('input.x > 0') + // Cached results are the same object reference + expect(result1).toBe(result2) + }) + + it('caches successful parse results', () => { + const result1 = parseExpression("input.priority === 'rush'") + expect(result1.success).toBe(true) + const result2 = parseExpression("input.priority === 'rush'") + expect(result2).toBe(result1) + }) + + it('caches failed parse results', () => { + const result1 = parseExpression('function foo() {}') + expect(result1.success).toBe(false) + const result2 = parseExpression('function foo() {}') + expect(result2).toBe(result1) + }) + + it('caches empty expression errors', () => { + const result1 = parseExpression('') + expect(result1.success).toBe(false) + const result2 = parseExpression('') + expect(result2).toBe(result1) + }) + + it('clearParseCache() resets the cache', () => { + const result1 = parseExpression('input.x > 0') + clearParseCache() + const result2 = parseExpression('input.x > 0') + // After clearing, result is a new object (not the same reference) + expect(result2).not.toBe(result1) + // But structurally equivalent + expect(result2).toEqual(result1) + }) + + it('different expressions have different cache entries', () => { + const result1 = parseExpression('input.a > 0') + const result2 = parseExpression('input.b > 0') + expect(result1).not.toBe(result2) + if (result1.success && result2.success) { + expect(result1.expression.memberPaths).toContain('input.a') + expect(result2.expression.memberPaths).toContain('input.b') + } + }) +}) diff --git a/packages/engine/src/__tests__/expressions/parser-security.test.ts b/packages/engine/src/__tests__/expressions/parser-security.test.ts index 6c23ffe..a2bd97d 100644 --- a/packages/engine/src/__tests__/expressions/parser-security.test.ts +++ b/packages/engine/src/__tests__/expressions/parser-security.test.ts @@ -2,15 +2,11 @@ import { describe, it, expect } from 'vitest' import { parseExpression } from '../../expressions/parser.js' describe('parser security', () => { - it('rejects arithmetic operators', () => { - // ALLOWED_BINARY_OPS only includes ===, !==, >, <, >=, <= - // Arithmetic operators (+, -, *, /, %) are NOT in the allowlist + it('allows arithmetic operators', () => { + // Arithmetic operators (+, -, *, /, %) are in the allowlist for (const op of ['+', '-', '*', '/', '%']) { const result = parseExpression(`input.a ${op} input.b`) - expect(result.success).toBe(false) - if (!result.success) { - expect(result.errors[0]?.message).toContain('Disallowed binary operator') - } + expect(result.success).toBe(true) } }) diff --git a/packages/engine/src/__tests__/expressions/parser.test.ts b/packages/engine/src/__tests__/expressions/parser.test.ts index e5fbd69..b8e151b 100644 --- a/packages/engine/src/__tests__/expressions/parser.test.ts +++ b/packages/engine/src/__tests__/expressions/parser.test.ts @@ -91,6 +91,55 @@ describe('parseExpression', () => { }) }) + describe('arithmetic expressions', () => { + it('parses simple addition', () => { + const result = parseExpression('2 + 3') + expect(result.success).toBe(true) + }) + + it('parses member access with multiplication', () => { + const result = parseExpression('input.qty * input.price') + expect(result.success).toBe(true) + if (result.success) { + expect(result.expression.identifiers).toContain('input') + expect(result.expression.memberPaths).toContain('input.qty') + expect(result.expression.memberPaths).toContain('input.price') + } + }) + + it('parses modulo operator', () => { + const result = parseExpression('total % 100') + expect(result.success).toBe(true) + if (result.success) { + expect(result.expression.identifiers).toContain('total') + } + }) + + it('parses subtraction', () => { + const result = parseExpression('input.a - input.b') + expect(result.success).toBe(true) + }) + + it('parses division', () => { + const result = parseExpression('input.total / input.count') + expect(result.success).toBe(true) + }) + + it('preserves operator precedence (2 + 3 * 4)', () => { + // Acorn parses with correct JS precedence: 2 + (3 * 4) = 14 + const result = parseExpression('2 + 3 * 4') + expect(result.success).toBe(true) + }) + + it('parses mixed arithmetic and comparison', () => { + const result = parseExpression('input.qty * input.price > 1000') + expect(result.success).toBe(true) + if (result.success) { + expect(result.expression.identifiers).toContain('input') + } + }) + }) + describe('invalid expressions', () => { it('rejects assignment', () => { const result = parseExpression('x = 5') diff --git a/packages/engine/src/__tests__/rules/walker-rules.test.ts b/packages/engine/src/__tests__/rules/walker-rules.test.ts index 95adbf5..1326956 100644 --- a/packages/engine/src/__tests__/rules/walker-rules.test.ts +++ b/packages/engine/src/__tests__/rules/walker-rules.test.ts @@ -81,7 +81,9 @@ describe('walker with rules', () => { expect(trace.steps).toHaveLength(2) expect(trace.steps[0]?.node_id).toBe('compute') expect(trace.steps[0]?.status).toBe('completed') - expect(trace.output).toEqual({ discount_percent: 20, shipping: 'free' }) + // Output includes node-keyed result and flat-merged fields + expect(trace.output).toMatchObject({ discount_percent: 20, shipping: 'free' }) + expect(trace.output).toHaveProperty('compute', { discount_percent: 20, shipping: 'free' }) expect(mockedLoadRulesFile).toHaveBeenCalledWith('discount.rules.yaml', '/tmp/test') expect(mockedLoadEntryPoint).not.toHaveBeenCalled() }) @@ -171,8 +173,8 @@ describe('walker with rules', () => { expect(trace.status).toBe('success') expect(trace.steps).toHaveLength(3) - // The process node received the discount from compute's rules result - expect(trace.output).toEqual({ applied: { discount: 15 } }) + // Output includes node-keyed results and flat-merged fields + expect(trace.output).toMatchObject({ process: { applied: { discount: 15 } } }) }) it('rejects unknown evaluator plugin', async () => { @@ -347,7 +349,8 @@ describe('walker with rules', () => { const trace = await runGraph(doc, makeOptions()) expect(trace.status).toBe('success') - expect(trace.output).toEqual({ tier: 'gold' }) + // Output includes node-keyed results (route, process) and flat-merged fields + expect(trace.output).toMatchObject({ tier: 'gold' }) }) it('rejects unknown evaluator plugin on switch', async () => { @@ -429,12 +432,7 @@ describe('walker with rules', () => { const trace = await runGraph(doc, makeOptions()) expect(trace.status).toBe('success') - expect(trace.steps.map((s) => s.node_id)).toEqual([ - 'classify', - 'route', - 'urgent', - 'done', - ]) + expect(trace.steps.map((s) => s.node_id)).toEqual(['classify', 'route', 'urgent', 'done']) }) it('handles flow with rules-driven switch and cases-driven switch', async () => { diff --git a/packages/engine/src/__tests__/security/path-containment.test.ts b/packages/engine/src/__tests__/security/path-containment.test.ts new file mode 100644 index 0000000..03f8f91 --- /dev/null +++ b/packages/engine/src/__tests__/security/path-containment.test.ts @@ -0,0 +1,54 @@ +import { describe, it, expect } from 'vitest' +import { assertWithinProject } from '../../security/path-containment.js' + +describe('assertWithinProject', () => { + const projectRoot = '/home/user/project' + + it('allows a path within the project root', () => { + expect(() => assertWithinProject('src/main.ts', projectRoot)).not.toThrow() + }) + + it('allows a nested path within the project root', () => { + expect(() => assertWithinProject('src/rules/order.rules.yaml', projectRoot)).not.toThrow() + }) + + it('throws on ../../etc/passwd style traversal', () => { + expect(() => assertWithinProject('../../etc/passwd', projectRoot)).toThrow( + /resolves outside project root/, + ) + }) + + it('throws on absolute path outside project', () => { + expect(() => assertWithinProject('/etc/passwd', projectRoot)).toThrow( + /resolves outside project root/, + ) + }) + + it('error message shows relative path, not absolute', () => { + try { + assertWithinProject('../../etc/passwd', projectRoot) + expect.fail('should have thrown') + } catch (err) { + const message = (err as Error).message + expect(message).toContain('../../etc/passwd') + expect(message).not.toContain(projectRoot) + } + }) + + it('throws on path that starts with projectRoot as prefix but is not a child', () => { + // /home/user/project-extra is not inside /home/user/project + expect(() => assertWithinProject('../project-extra/file', projectRoot)).toThrow( + /resolves outside project root/, + ) + }) + + it('allows the project root itself', () => { + expect(() => assertWithinProject('.', projectRoot)).not.toThrow() + }) + + it('throws on sneaky path with encoded traversal', () => { + expect(() => assertWithinProject('src/../../../etc/shadow', projectRoot)).toThrow( + /resolves outside project root/, + ) + }) +}) diff --git a/packages/engine/src/__tests__/security/reserved-node-ids.test.ts b/packages/engine/src/__tests__/security/reserved-node-ids.test.ts new file mode 100644 index 0000000..2bda30d --- /dev/null +++ b/packages/engine/src/__tests__/security/reserved-node-ids.test.ts @@ -0,0 +1,87 @@ +import { describe, it, expect } from 'vitest' +import { validate } from '@ruminaider/flowprint-schema' + +function makeDoc(nodeId: string) { + return { + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + [nodeId]: { type: 'action', lane: 'main', label: 'Step', next: 'end' }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + } +} + +describe('reserved node IDs', () => { + it('rejects node ID "input"', () => { + const result = validate(makeDoc('input')) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => e.path === '/nodes/input' && e.message.includes('reserved'), + ), + ).toBe(true) + }) + + it('rejects node ID "state"', () => { + const result = validate(makeDoc('state')) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => e.path === '/nodes/state' && e.message.includes('reserved'), + ), + ).toBe(true) + }) + + it('rejects node ID "Math"', () => { + const result = validate(makeDoc('Math')) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => e.path === '/nodes/Math' && e.message.includes('reserved'), + ), + ).toBe(true) + }) + + it('rejects node ID "node"', () => { + const result = validate(makeDoc('node')) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => e.path === '/nodes/node' && e.message.includes('reserved'), + ), + ).toBe(true) + }) + + it('rejects node ID "output"', () => { + const result = validate(makeDoc('output')) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => e.path === '/nodes/output' && e.message.includes('reserved'), + ), + ).toBe(true) + }) + + it('allows non-reserved node ID "my_action"', () => { + const result = validate(makeDoc('my_action')) + expect(result.valid).toBe(true) + }) + + it('allows non-reserved node ID "process_input"', () => { + const result = validate(makeDoc('process_input')) + expect(result.valid).toBe(true) + }) + + it('error message lists all reserved IDs', () => { + const result = validate(makeDoc('input')) + const error = result.errors.find((e) => e.path === '/nodes/input') + expect(error?.message).toContain('input') + expect(error?.message).toContain('state') + expect(error?.message).toContain('Math') + expect(error?.message).toContain('node') + expect(error?.message).toContain('output') + }) +}) diff --git a/packages/engine/src/__tests__/security/yaml-limits.test.ts b/packages/engine/src/__tests__/security/yaml-limits.test.ts new file mode 100644 index 0000000..9ed849f --- /dev/null +++ b/packages/engine/src/__tests__/security/yaml-limits.test.ts @@ -0,0 +1,56 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { parse } from 'yaml' + +describe('YAML parsing limits', () => { + it('rejects YAML with excessive aliases (YAML bomb protection)', () => { + // Build a YAML bomb: define an anchor and reference it 101+ times + const lines = ['top: &a value'] + for (let i = 0; i < 101; i++) { + lines.push(`k${String(i)}: *a`) + } + const yaml = lines.join('\n') + + // With maxAliasCount: 100, parsing should throw + expect(() => parse(yaml, { maxAliasCount: 100, schema: 'core' })).toThrow( + /excessive alias count/i, + ) + }) + + it('accepts YAML with aliases within the limit', () => { + const lines = ['top: &a value'] + for (let i = 0; i < 50; i++) { + lines.push(`k${String(i)}: *a`) + } + const yaml = lines.join('\n') + + // With maxAliasCount: 100, parsing 50 aliases should succeed + const result = parse(yaml, { maxAliasCount: 100, schema: 'core' }) + expect(result).toBeDefined() + expect(result.top).toBe('value') + expect(result.k0).toBe('value') + }) + + it('verifies loadRulesFile rejects YAML bombs', async () => { + // Dynamically import to test the actual code path with mocked fs + vi.mock('node:fs', () => ({ + readFileSync: vi.fn(), + })) + + const { readFileSync } = await import('node:fs') + const mockedReadFileSync = vi.mocked(readFileSync) + const { loadRulesFile } = await import('../../rules/evaluator.js') + + // Build a YAML bomb with 101 aliases + const lines = ['schema: flowprint-rules/1.0', 'name: bomb', 'hit_policy: first', 'x: &a val'] + for (let i = 0; i < 101; i++) { + lines.push(`k${String(i)}: *a`) + } + lines.push('rules:', ' - then:', ' result: true') + + mockedReadFileSync.mockReturnValue(lines.join('\n')) + + expect(() => loadRulesFile('bomb.yaml', '/project')).toThrow(/parse/i) + + vi.restoreAllMocks() + }) +}) diff --git a/packages/engine/src/__tests__/walker/walk.test.ts b/packages/engine/src/__tests__/walker/walk.test.ts new file mode 100644 index 0000000..b113dbb --- /dev/null +++ b/packages/engine/src/__tests__/walker/walk.test.ts @@ -0,0 +1,599 @@ +import { describe, it, expect, vi } from 'vitest' +import { walkGraph } from '../../walker/walk.js' +import type { WalkGraphCallbacks } from '../../walker/walk.js' +import type { ExecutionContext } from '../../walker/types.js' +import type { StepResult } from '../../runner/types.js' +import type { FlowprintDocument } from '@ruminaider/flowprint-schema' + +function makeDoc(nodes: FlowprintDocument['nodes']): FlowprintDocument { + return { + schema: 'flowprint/1.0', + name: 'test-flow', + version: '1.0.0', + lanes: { + default: { label: 'Default', visibility: 'internal', order: 0 }, + }, + nodes, + } +} + +/** + * Build minimal mock callbacks that record step data. + * All handlers return simple outputs that can be flat-merged into state. + */ +function makeMockCallbacks( + overrides: Partial> = {}, +): WalkGraphCallbacks { + return { + onAction: vi.fn(async (_nodeId, _node, _ctx) => ({})), + onSwitch: vi.fn(async (_nodeId, _node, _ctx) => undefined), + onParallel: vi.fn(async (_nodeId, _node, _ctx) => ({})), + onWait: vi.fn(async (_nodeId, _node, _ctx) => ({})), + onError: vi.fn(async (_nodeId, _node, _ctx) => undefined), + onTrigger: vi.fn(async (_nodeId, _node, _ctx) => undefined), + onTerminal: vi.fn(async (_nodeId, _node, _ctx) => {}), + onStep: vi.fn(), + ...overrides, + } +} + +describe('walkGraph', () => { + describe('traversal order', () => { + it('follows next pointers through a simple linear flow', async () => { + const doc = makeDoc({ + step1: { + type: 'action', + lane: 'default', + label: 'Step 1', + entry_points: [], + next: 'step2', + }, + step2: { + type: 'action', + lane: 'default', + label: 'Step 2', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const visitOrder: string[] = [] + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + return { [`${nodeId}_output`]: true } + }), + onTerminal: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + }), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(visitOrder).toEqual(['step1', 'step2', 'done']) + expect(result.outcome).toBe('success') + }) + }) + + describe('flat merge behavior', () => { + it('merges node outputs into accumulated state', async () => { + const doc = makeDoc({ + node_a: { + type: 'action', + lane: 'default', + label: 'A', + entry_points: [], + next: 'node_b', + }, + node_b: { + type: 'action', + lane: 'default', + label: 'B', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId) => { + if (nodeId === 'node_a') return { x: 1 } + if (nodeId === 'node_b') return { y: 2 } + return {} + }), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(result.output).toMatchObject({ x: 1, y: 2 }) + }) + + it('last writer wins on flat merge conflict', async () => { + const doc = makeDoc({ + node_a: { + type: 'action', + lane: 'default', + label: 'A', + entry_points: [], + next: 'node_b', + }, + node_b: { + type: 'action', + lane: 'default', + label: 'B', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId) => { + if (nodeId === 'node_a') return { x: 1 } + if (nodeId === 'node_b') return { x: 2 } + return {} + }), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(result.output.x).toBe(2) // last writer wins + }) + }) + + describe('AbortSignal', () => { + it('terminates immediately with pre-aborted signal', async () => { + const doc = makeDoc({ + step1: { + type: 'action', + lane: 'default', + label: 'Step 1', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const controller = new AbortController() + controller.abort() + + const callbacks = makeMockCallbacks() + + const result = await walkGraph(doc, {}, callbacks, { + abortController: controller, + }) + + // Should not have visited any nodes + expect(callbacks.onAction).not.toHaveBeenCalled() + expect(callbacks.onTerminal).not.toHaveBeenCalled() + expect(result.outcome).toBeUndefined() + }) + }) + + describe('switch traversal', () => { + it('follows the branch returned by onSwitch', async () => { + const doc = makeDoc({ + decide: { + type: 'switch', + lane: 'default', + label: 'Decide', + cases: [ + { when: 'true', next: 'path_a' }, + { when: 'false', next: 'path_b' }, + ], + default: 'path_b', + }, + path_a: { + type: 'terminal', + lane: 'default', + label: 'Path A', + outcome: 'success', + }, + path_b: { + type: 'terminal', + lane: 'default', + label: 'Path B', + outcome: 'failure', + }, + }) + + const callbacks = makeMockCallbacks({ + onSwitch: vi.fn(async () => 'path_a'), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(result.outcome).toBe('success') + expect(callbacks.onTerminal).toHaveBeenCalledWith( + 'path_a', + expect.objectContaining({ type: 'terminal', outcome: 'success' }), + expect.any(Object), + ) + }) + }) + + describe('terminal handling', () => { + it('stops at terminal node and returns outcome', async () => { + const doc = makeDoc({ + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'failure', + }, + }) + + const callbacks = makeMockCallbacks() + + const result = await walkGraph(doc, {}, callbacks) + + expect(result.outcome).toBe('failure') + expect(callbacks.onTerminal).toHaveBeenCalledTimes(1) + }) + }) + + describe('trigger -> action -> terminal', () => { + it('handles a basic 3-node flow starting with trigger', async () => { + const doc = makeDoc({ + start: { + type: 'trigger', + lane: 'default', + label: 'Start', + trigger_type: 'manual', + next: 'process', + }, + process: { + type: 'action', + lane: 'default', + label: 'Process', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const visitOrder: string[] = [] + const callbacks = makeMockCallbacks({ + onTrigger: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + return undefined // let walker use node.next + }), + onAction: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + return { processed: true } + }), + onTerminal: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + }), + }) + + const result = await walkGraph(doc, { initial: 'data' }, callbacks) + + expect(visitOrder).toEqual(['start', 'process', 'done']) + expect(result.outcome).toBe('success') + expect(result.output).toMatchObject({ processed: true }) + }) + }) + + describe('context management', () => { + it('passes input to ExecutionContext', async () => { + const doc = makeDoc({ + step: { + type: 'action', + lane: 'default', + label: 'Step', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + let capturedCtx: ExecutionContext | undefined + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (_nodeId, _node, ctx) => { + capturedCtx = ctx + return {} + }), + }) + + await walkGraph(doc, { user: 'alice' }, callbacks) + + expect(capturedCtx?.input).toEqual({ user: 'alice' }) + expect(capturedCtx?.node.id).toBe('step') + expect(capturedCtx?.node.type).toBe('action') + expect(capturedCtx?.node.lane).toBe('default') + }) + + it('accumulated state is visible to subsequent callbacks', async () => { + const doc = makeDoc({ + first: { + type: 'action', + lane: 'default', + label: 'First', + entry_points: [], + next: 'second', + }, + second: { + type: 'action', + lane: 'default', + label: 'Second', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + let secondCtxState: Record | undefined + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId, _node, ctx) => { + if (nodeId === 'first') return { fromFirst: 42 } + if (nodeId === 'second') { + secondCtxState = { ...ctx.state } + return {} + } + return {} + }), + }) + + await walkGraph(doc, {}, callbacks) + + expect(secondCtxState).toMatchObject({ fromFirst: 42 }) + }) + }) + + describe('error handling', () => { + it('routes action error to error node when error.catch is defined', async () => { + const doc = makeDoc({ + risky: { + type: 'action', + lane: 'default', + label: 'Risky', + entry_points: [], + next: 'done', + error: { catch: 'handle_error' }, + }, + handle_error: { + type: 'error', + lane: 'default', + label: 'Handle Error', + next: 'failed', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + failed: { + type: 'terminal', + lane: 'default', + label: 'Failed', + outcome: 'failure', + }, + }) + + const visitOrder: string[] = [] + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + throw new Error('action failed') + }), + onError: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + return 'failed' + }), + onTerminal: vi.fn(async (nodeId) => { + visitOrder.push(nodeId) + }), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(visitOrder).toEqual(['risky', 'handle_error', 'failed']) + expect(result.outcome).toBe('failure') + }) + + it('throws when action fails without error.catch', async () => { + const doc = makeDoc({ + risky: { + type: 'action', + lane: 'default', + label: 'Risky', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async () => { + throw new Error('unhandled failure') + }), + }) + + await expect(walkGraph(doc, {}, callbacks)).rejects.toThrow('unhandled failure') + }) + }) + + describe('compensation', () => { + it('runs compensation in LIFO order on error', async () => { + const doc = makeDoc({ + action1: { + type: 'action', + lane: 'default', + label: 'Action 1', + entry_points: [], + compensation: { file: 'comp1.ts', symbol: 'undo1' }, + next: 'action2', + }, + action2: { + type: 'action', + lane: 'default', + label: 'Action 2', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const compensationOrder: string[] = [] + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId) => { + if (nodeId === 'action1') return { created: true } + throw new Error('action2 failed') + }), + onCompensation: vi.fn((_nodeId, _comp, _result) => { + return async () => { + compensationOrder.push('comp_action1') + } + }), + onCompensationStep: vi.fn(), + }) + + await expect(walkGraph(doc, {}, callbacks)).rejects.toThrow('action2 failed') + expect(compensationOrder).toEqual(['comp_action1']) + expect(callbacks.onCompensationStep).toHaveBeenCalledWith('action1') + }) + }) + + describe('no root nodes', () => { + it('throws when no root nodes found', async () => { + const doc = makeDoc({ + a: { + type: 'action', + lane: 'default', + label: 'A', + entry_points: [], + next: 'b', + }, + b: { + type: 'action', + lane: 'default', + label: 'B', + entry_points: [], + next: 'a', + }, + }) + + const callbacks = makeMockCallbacks() + + await expect(walkGraph(doc, {}, callbacks)).rejects.toThrow( + 'No root nodes found in the document', + ) + }) + }) + + describe('node not found', () => { + it('throws when next points to nonexistent node', async () => { + const doc = makeDoc({ + step: { + type: 'action', + lane: 'default', + label: 'Step', + entry_points: [], + next: 'nonexistent', + }, + }) + + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async () => ({})), + }) + + await expect(walkGraph(doc, {}, callbacks)).rejects.toThrow( + 'Node "nonexistent" not found in document', + ) + }) + }) + + describe('onStep trace collection', () => { + it('collects steps from onStep calls into WalkResult.trace', async () => { + const doc = makeDoc({ + step: { + type: 'action', + lane: 'default', + label: 'Step', + entry_points: [], + next: 'done', + }, + done: { + type: 'terminal', + lane: 'default', + label: 'Done', + outcome: 'success', + }, + }) + + const callbacks = makeMockCallbacks({ + onAction: vi.fn(async (nodeId, _node, _ctx) => { + // Simulate the adapter calling onStep + callbacks.onStep({ + node_id: nodeId, + type: 'action', + status: 'completed', + }) + return {} + }), + onTerminal: vi.fn(async (nodeId) => { + callbacks.onStep({ + node_id: nodeId, + type: 'terminal', + status: 'reached', + outcome: 'success', + }) + }), + }) + + const result = await walkGraph(doc, {}, callbacks) + + expect(result.trace).toHaveLength(2) + expect(result.trace[0]?.node_id).toBe('step') + expect(result.trace[1]?.node_id).toBe('done') + }) + }) +}) diff --git a/packages/engine/src/expressions/allowlist.ts b/packages/engine/src/expressions/allowlist.ts index 4549b9e..66d1b05 100644 --- a/packages/engine/src/expressions/allowlist.ts +++ b/packages/engine/src/expressions/allowlist.ts @@ -11,7 +11,19 @@ export const ALLOWED_AST_TYPES = new Set([ 'Literal', ]) -export const ALLOWED_BINARY_OPS = new Set(['===', '!==', '>', '<', '>=', '<=']) +export const ALLOWED_BINARY_OPS = new Set([ + '===', + '!==', + '>', + '<', + '>=', + '<=', + '+', + '-', + '*', + '/', + '%', +]) export const ALLOWED_LOGICAL_OPS = new Set(['&&', '||']) export const ALLOWED_UNARY_OPS = new Set(['!', 'typeof']) diff --git a/packages/engine/src/expressions/cache.ts b/packages/engine/src/expressions/cache.ts new file mode 100644 index 0000000..4c5cbf7 --- /dev/null +++ b/packages/engine/src/expressions/cache.ts @@ -0,0 +1,55 @@ +/** + * Minimal LRU (Least Recently Used) cache. + * + * Uses a Map's insertion-order iteration to track recency. + * On get: deletes and re-inserts the entry to move it to the end (most recent). + * On set at capacity: deletes the first entry (least recently used). + * + * Zero external dependencies. + */ +export class LRUCache { + private readonly map = new Map() + readonly capacity: number + + constructor(capacity: number) { + if (capacity < 1) { + throw new Error('LRU cache capacity must be at least 1') + } + this.capacity = capacity + } + + get(key: K): V | undefined { + if (!this.map.has(key)) { + return undefined + } + // Move to end (most recently used) by delete + re-insert + const value = this.map.get(key)! + this.map.delete(key) + this.map.set(key, value) + return value + } + + set(key: K, value: V): void { + if (this.map.has(key)) { + // Update existing: delete + re-insert to move to end + this.map.delete(key) + } else if (this.map.size >= this.capacity) { + // Evict least recently used (first entry in Map iteration order) + const oldest = this.map.keys().next().value as K + this.map.delete(oldest) + } + this.map.set(key, value) + } + + has(key: K): boolean { + return this.map.has(key) + } + + get size(): number { + return this.map.size + } + + clear(): void { + this.map.clear() + } +} diff --git a/packages/engine/src/expressions/index.ts b/packages/engine/src/expressions/index.ts index 61348dd..1af244a 100644 --- a/packages/engine/src/expressions/index.ts +++ b/packages/engine/src/expressions/index.ts @@ -1,5 +1,8 @@ -export { parseExpression } from './parser.js' +export { parseExpression, clearParseCache } from './parser.js' export { validateExpressions } from './validator.js' +export { interpretExpression, InterpreterError } from './interpreter.js' +export type { InterpreterContext } from './interpreter.js' +export { LRUCache } from './cache.js' export type { ParseResult, ExpressionError, ParsedExpression } from './types.js' export type { ExpressionValidationResult, ExpressionValidationError } from './validator.js' export { diff --git a/packages/engine/src/expressions/interpreter.ts b/packages/engine/src/expressions/interpreter.ts new file mode 100644 index 0000000..ad8f673 --- /dev/null +++ b/packages/engine/src/expressions/interpreter.ts @@ -0,0 +1,229 @@ +/** + * Browser-safe AST interpreter for flowprint expressions. + * + * Walks the acorn AST and evaluates expressions without `node:vm`. + * Only supports the allowlisted AST types and operators. + */ +import * as acorn from 'acorn' +import { + ALLOWED_BINARY_OPS, + ALLOWED_LOGICAL_OPS, + ALLOWED_UNARY_OPS, + ALLOWED_METHODS, + ALLOWED_MATH_MEMBERS, +} from './allowlist.js' + +/* eslint-disable @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call, @typescript-eslint/no-unsafe-return, @typescript-eslint/no-explicit-any, @typescript-eslint/no-non-null-assertion */ + +export class InterpreterError extends Error { + constructor(message: string) { + super(message) + this.name = 'InterpreterError' + } +} + +/** + * Build a frozen Math object with only allowlisted methods/constants. + */ +function buildSafeMath(): Readonly> { + const safeMath: Record = {} + for (const key of ALLOWED_MATH_MEMBERS) { + safeMath[key] = Math[key as keyof typeof Math] + } + return Object.freeze(safeMath) +} + +const SAFE_MATH = buildSafeMath() + +export interface InterpreterContext { + /** Workflow input */ + input: unknown + /** Previous node results keyed by node ID */ + results: Map +} + +/** + * Interpret a flowprint expression by parsing and walking the AST. + * + * Browser-safe: no `node:vm` dependency. Uses only the allowlisted + * operators and methods from the expression allowlist. + * + * @param source - The expression source string + * @param context - Execution context with input and node results + * @returns The evaluated result + */ +export function interpretExpression(source: string, context: InterpreterContext): unknown { + const ast = acorn.parseExpressionAt(source, 0, { ecmaVersion: 2022 }) + return evaluate(ast as any, context) +} + +function evaluate(node: any, ctx: InterpreterContext): unknown { + switch (node.type as string) { + case 'Literal': + return node.value + + case 'TemplateLiteral': + return evaluateTemplateLiteral(node, ctx) + + case 'Identifier': + return resolveIdentifier(node.name as string, ctx) + + case 'MemberExpression': + return evaluateMemberExpression(node, ctx) + + case 'BinaryExpression': + return evaluateBinaryExpression(node, ctx) + + case 'LogicalExpression': + return evaluateLogicalExpression(node, ctx) + + case 'UnaryExpression': + return evaluateUnaryExpression(node, ctx) + + case 'ConditionalExpression': + return evaluate(node.test, ctx) ? evaluate(node.consequent, ctx) : evaluate(node.alternate, ctx) + + case 'CallExpression': + return evaluateCallExpression(node, ctx) + + default: + throw new InterpreterError(`Unsupported AST node type: ${node.type as string}`) + } +} + +function resolveIdentifier(name: string, ctx: InterpreterContext): unknown { + if (name === 'input') return ctx.input + if (name === 'Math') return SAFE_MATH + if (ctx.results.has(name)) return ctx.results.get(name) + throw new InterpreterError(`Unknown identifier: ${name}`) +} + +function evaluateMemberExpression(node: any, ctx: InterpreterContext): unknown { + const object = evaluate(node.object, ctx) + + let property: string + if (node.computed) { + property = String(evaluate(node.property, ctx)) + } else { + property = node.property.name as string + } + + if (object === null || object === undefined) { + throw new InterpreterError(`Cannot read property '${property}' of ${String(object)}`) + } + + return (object as any)[property] +} + +function evaluateBinaryExpression(node: any, ctx: InterpreterContext): unknown { + const op = node.operator as string + if (!ALLOWED_BINARY_OPS.has(op)) { + throw new InterpreterError(`Disallowed binary operator: ${op}`) + } + + const left = evaluate(node.left, ctx) as any + const right = evaluate(node.right, ctx) as any + + switch (op) { + case '===': + return left === right + case '!==': + return left !== right + case '>': + return left > right + case '<': + return left < right + case '>=': + return left >= right + case '<=': + return left <= right + case '+': + return left + right + case '-': + return left - right + case '*': + return left * right + case '/': + return left / right + case '%': + return left % right + default: + throw new InterpreterError(`Unhandled binary operator: ${op}`) + } +} + +function evaluateLogicalExpression(node: any, ctx: InterpreterContext): unknown { + const op = node.operator as string + if (!ALLOWED_LOGICAL_OPS.has(op)) { + throw new InterpreterError(`Disallowed logical operator: ${op}`) + } + + if (op === '&&') { + return evaluate(node.left, ctx) && evaluate(node.right, ctx) + } + // op === '||' + return evaluate(node.left, ctx) || evaluate(node.right, ctx) +} + +function evaluateUnaryExpression(node: any, ctx: InterpreterContext): unknown { + const op = node.operator as string + if (!ALLOWED_UNARY_OPS.has(op)) { + throw new InterpreterError(`Disallowed unary operator: ${op}`) + } + + const arg = evaluate(node.argument, ctx) + + if (op === '!') return !arg + if (op === 'typeof') return typeof arg + throw new InterpreterError(`Unhandled unary operator: ${op}`) +} + +function evaluateTemplateLiteral(node: any, ctx: InterpreterContext): string { + const quasis: string[] = (node.quasis as any[]).map((q: any) => q.value.cooked as string) + const expressions: unknown[] = (node.expressions as any[]).map((e: any) => evaluate(e, ctx)) + + let result = quasis[0]! + for (let i = 0; i < expressions.length; i++) { + result += String(expressions[i]) + result += quasis[i + 1]! + } + return result +} + +function evaluateCallExpression(node: any, ctx: InterpreterContext): unknown { + const callee = node.callee + if (callee?.type !== 'MemberExpression') { + throw new InterpreterError('Only method calls on objects are supported') + } + + const object = evaluate(callee.object, ctx) + const method = callee.computed + ? String(evaluate(callee.property, ctx)) + : (callee.property.name as string) + + // Math.fn() calls + if (object === SAFE_MATH) { + if (!ALLOWED_MATH_MEMBERS.has(method)) { + throw new InterpreterError(`Disallowed Math member: Math.${method}`) + } + const fn = (SAFE_MATH as any)[method] + if (typeof fn !== 'function') { + throw new InterpreterError(`Math.${method} is not a function`) + } + const args = (node.arguments as any[]).map((a: any) => evaluate(a, ctx)) + return fn(...args) + } + + // obj.method() calls + if (!ALLOWED_METHODS.has(method)) { + throw new InterpreterError(`Disallowed method call: ${method}`) + } + + const fn = (object as any)[method] + if (typeof fn !== 'function') { + throw new InterpreterError(`${method} is not a function`) + } + + const args = (node.arguments as any[]).map((a: any) => evaluate(a, ctx)) + return fn.call(object, ...args) +} diff --git a/packages/engine/src/expressions/parser.ts b/packages/engine/src/expressions/parser.ts index c049c8f..5b1fad1 100644 --- a/packages/engine/src/expressions/parser.ts +++ b/packages/engine/src/expressions/parser.ts @@ -8,14 +8,30 @@ import { ALLOWED_METHODS, ALLOWED_MATH_MEMBERS, } from './allowlist.js' +import { LRUCache } from './cache.js' /* eslint-disable @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-explicit-any, @typescript-eslint/no-non-null-assertion */ const FORBIDDEN_IDENTIFIERS = new Set(['Date', 'this', 'globalThis', 'window', 'self', 'process']) +const PARSE_CACHE = new LRUCache(1000) + +/** + * Clear the expression parse cache. + * Useful for testing or when allowlist changes at runtime. + */ +export function clearParseCache(): void { + PARSE_CACHE.clear() +} + export function parseExpression(source: string): ParseResult { + const cached = PARSE_CACHE.get(source) + if (cached) return cached + if (source.trim().length === 0) { - return { success: false, errors: [{ message: 'Expression is empty' }] } + const result: ParseResult = { success: false, errors: [{ message: 'Expression is empty' }] } + PARSE_CACHE.set(source, result) + return result } let ast: acorn.Node @@ -24,15 +40,19 @@ export function parseExpression(source: string): ParseResult { } catch (e: unknown) { const msg = e instanceof Error ? e.message : 'Parse error' const pos = e instanceof SyntaxError ? ((e as any).pos as number | undefined) : undefined - return { success: false, errors: [{ message: msg, position: pos }] } + const result: ParseResult = { success: false, errors: [{ message: msg, position: pos }] } + PARSE_CACHE.set(source, result) + return result } // Ensure the entire source was consumed (no trailing content except whitespace) if (ast.end < source.trimEnd().length) { - return { + const result: ParseResult = { success: false, errors: [{ message: 'Unexpected content after expression', position: ast.end }], } + PARSE_CACHE.set(source, result) + return result } const errors: ExpressionError[] = [] @@ -230,10 +250,12 @@ export function parseExpression(source: string): ParseResult { walk(ast, false) if (errors.length > 0) { - return { success: false, errors } + const result: ParseResult = { success: false, errors } + PARSE_CACHE.set(source, result) + return result } - return { + const result: ParseResult = { success: true, expression: { source, @@ -241,6 +263,8 @@ export function parseExpression(source: string): ParseResult { memberPaths: [...memberPaths], }, } + PARSE_CACHE.set(source, result) + return result } /** diff --git a/packages/engine/src/index.ts b/packages/engine/src/index.ts index 0f216c9..39062d0 100644 --- a/packages/engine/src/index.ts +++ b/packages/engine/src/index.ts @@ -1,11 +1,19 @@ // Expressions -export { parseExpression, validateExpressions } from './expressions/index.js' +export { + parseExpression, + clearParseCache, + validateExpressions, + interpretExpression, + InterpreterError, + LRUCache, +} from './expressions/index.js' export type { ParseResult, ExpressionError, ParsedExpression, ExpressionValidationResult, ExpressionValidationError, + InterpreterContext, } from './expressions/index.js' // Runner @@ -34,15 +42,21 @@ export type { RulesTestResult, } from './rules/index.js' -// Walker (new execution types — will replace runner types in PR 1b) +// Walker (generic graph walker + types) +export { walkGraph } from './walker/index.js' export type { ExecutionContext as WalkerExecutionContext, NodeExecutionRecord, WalkerCallbacks, WalkOptions, WalkResult, + WalkGraphCallbacks, + CompensationEntry, } from './walker/index.js' +// Security +export { assertWithinProject } from './security/index.js' + // Codegen export { generateCode } from './codegen/index.js' export type { GenerateResult, GenerateOptions, GeneratedFile } from './codegen/index.js' diff --git a/packages/engine/src/rules/evaluator.ts b/packages/engine/src/rules/evaluator.ts index 3b68a5f..cc54892 100644 --- a/packages/engine/src/rules/evaluator.ts +++ b/packages/engine/src/rules/evaluator.ts @@ -1,6 +1,7 @@ import { readFileSync } from 'node:fs' import { resolve } from 'node:path' import { parse } from 'yaml' +import { assertWithinProject } from '../security/index.js' import { validateRules } from '@ruminaider/flowprint-schema' import type { ExecutionContext } from '../runner/types.js' import { evaluateExpression } from '../runner/evaluator.js' @@ -24,6 +25,7 @@ import type { * @returns Parsed and validated RulesDocument */ export function loadRulesFile(filePath: string, projectRoot: string): RulesDocument { + assertWithinProject(filePath, projectRoot) const absolutePath = resolve(projectRoot, filePath) let content: string @@ -36,7 +38,7 @@ export function loadRulesFile(filePath: string, projectRoot: string): RulesDocum let doc: unknown try { - doc = parse(content) + doc = parse(content, { maxAliasCount: 100, schema: 'core' }) } catch (err: unknown) { const message = err instanceof Error ? err.message : String(err) throw new Error(`Failed to parse rules file "${filePath}": ${message}`) diff --git a/packages/engine/src/runner/loader.ts b/packages/engine/src/runner/loader.ts index c3de085..a551bd9 100644 --- a/packages/engine/src/runner/loader.ts +++ b/packages/engine/src/runner/loader.ts @@ -1,4 +1,5 @@ import { resolve } from 'node:path' +import { assertWithinProject } from '../security/index.js' /** * Dynamically import an entry point file and extract the named symbol. @@ -11,6 +12,7 @@ export async function loadEntryPoint( entry: { file: string; symbol: string }, projectRoot: string, ): Promise<(...args: unknown[]) => unknown> { + assertWithinProject(entry.file, projectRoot) const filePath = resolve(projectRoot, entry.file) let mod: Record diff --git a/packages/engine/src/runner/walker.ts b/packages/engine/src/runner/walker.ts index 7f1dc51..2843871 100644 --- a/packages/engine/src/runner/walker.ts +++ b/packages/engine/src/runner/walker.ts @@ -5,555 +5,480 @@ import type { ParallelNode, WaitNode, ErrorNode, - TerminalNode, TriggerNode, + TerminalNode, } from '@ruminaider/flowprint-schema' -import { - findRoots, - isActionNode, - isSwitchNode, - isParallelNode, - isWaitNode, - isErrorNode, - isTerminalNode, - isTriggerNode, -} from '@ruminaider/flowprint-schema' -import type { RunOptions, ExecutionContext, StepResult, ExecutionTrace } from './types.js' +import { isActionNode } from '@ruminaider/flowprint-schema' +import type { RunOptions, StepResult, ExecutionTrace } from './types.js' +import type { ExecutionContext } from '../walker/types.js' +import type { WalkGraphCallbacks } from '../walker/walk.js' +import { walkGraph } from '../walker/walk.js' import { evaluateExpression } from './evaluator.js' import { loadEntryPoint } from './loader.js' import { loadRulesFile, evaluateRules } from '../rules/evaluator.js' -interface CompensationEntry { - nodeId: string - entry: { file: string; symbol: string } +/** + * Build an old-style ExecutionContext (Map-based) from the new walker context. + * Used to maintain compatibility with the evaluator and rules engine + * which expect { input, results: Map }. + */ +function buildLegacyContext(ctx: ExecutionContext): { + input: unknown + results: Map +} { + const results = new Map() + for (const [key, value] of Object.entries(ctx.state)) { + results.set(key, value) + } + return { input: ctx.input, results } } /** - * Execute a flowprint document using an in-process graph walker. + * Execute a flowprint document using the generic graph walker. * - * Walks the graph starting from root nodes, executing each node in sequence. - * Action nodes call their entry_point functions, switch nodes evaluate - * expressions to determine routing, parallel nodes execute branches - * concurrently, and terminal nodes end execution. + * This is a thin adapter that wires the existing Node.js evaluator, loader, + * and rules engine into walkGraph's callback interface. */ export async function runGraph( doc: FlowprintDocument, options: RunOptions, ): Promise { const startTime = performance.now() + const input = + options.input && typeof options.input === 'object' && !Array.isArray(options.input) + ? (options.input as Record) + : ({} as Record) + + // Externally tracked steps — survives even if walkGraph throws const steps: StepResult[] = [] - const context: ExecutionContext = { - input: options.input, - results: new Map(), + + const recordStep = (record: StepResult): void => { + steps.push(record) } - // Compensation stack for saga-style rollback (LIFO) - const compensationStack: CompensationEntry[] = [] + const callbacks: WalkGraphCallbacks = { + onAction: async (nodeId: string, node: ActionNode, ctx: ExecutionContext): Promise => { + const stepStart = performance.now() + + try { + // Rules-driven action + if (node.rules) { + if (node.rules.evaluator && node.rules.evaluator !== 'builtin') { + throw new Error( + `Action node "${nodeId}" uses unknown evaluator "${node.rules.evaluator}". Only "builtin" is supported.`, + ) + } + + const rulesDoc = loadRulesFile(node.rules.file, options.projectRoot) + const legacyCtx = buildLegacyContext(ctx) + const rulesResult = evaluateRules(rulesDoc, legacyCtx, options.expressionTimeout) + + // Store under nodeId for backward compat (evaluator references like "nodeId.field") + ctx.state[nodeId] = rulesResult.output + + recordStep({ + node_id: nodeId, + type: 'action', + status: 'completed', + duration_ms: Math.round(performance.now() - stepStart), + next: node.next, + }) + + return rulesResult.output + } - const roots = findRoots(doc) - if (roots.length === 0) { - return { - status: 'error', - duration_ms: Math.round(performance.now() - startTime), - steps, - error: 'No root nodes found in the document', - } - } + const entryPoint = node.entry_points?.[0] + if (!entryPoint) { + throw new Error(`Action node "${nodeId}" has no entry_point or rules defined`) + } - // Start walking from the first root node - let currentNodeId: string | undefined = roots[0] + const fn = await loadEntryPoint(entryPoint, options.projectRoot) + + // Evaluate input expressions if present + let args: unknown + if (node.inputs) { + const evaluated: Record = {} + const legacyCtx = buildLegacyContext(ctx) + for (const [key, expr] of Object.entries(node.inputs)) { + evaluated[key] = evaluateExpression(expr, legacyCtx, options.expressionTimeout) + } + args = evaluated + } else { + args = ctx.input + } - try { - while (currentNodeId) { - const node = doc.nodes[currentNodeId] - if (!node) { - throw new Error(`Node "${currentNodeId}" not found in document`) - } + const result = await fn(args) - if (isActionNode(node)) { - currentNodeId = await executeAction( - currentNodeId, - node, - context, - options, - steps, - compensationStack, - doc, - ) - } else if (isSwitchNode(node)) { - currentNodeId = executeSwitch(currentNodeId, node, context, options, steps) - } else if (isParallelNode(node)) { - currentNodeId = await executeParallel( - currentNodeId, - node, - context, - options, - steps, - compensationStack, - doc, - ) - } else if (isWaitNode(node)) { - currentNodeId = executeWait(currentNodeId, node, context, options, steps) - } else if (isErrorNode(node)) { - currentNodeId = await executeErrorHandler(currentNodeId, node, context, options, steps) - } else if (isTriggerNode(node)) { - currentNodeId = executeTrigger(currentNodeId, node, steps) - } else if (isTerminalNode(node)) { - executeTerminal(currentNodeId, node, steps) - currentNodeId = undefined - } else { - throw new Error(`Unknown node type for node "${currentNodeId}"`) - } - } + // Store under nodeId for backward compat (evaluator references like "nodeId.field") + ctx.state[nodeId] = result - // Determine final status from the last terminal step - const lastStep = steps[steps.length - 1] - const outcome = lastStep?.outcome - const status = outcome === 'failure' ? 'failure' : 'success' + recordStep({ + node_id: nodeId, + type: 'action', + status: 'completed', + duration_ms: Math.round(performance.now() - stepStart), + next: node.next, + }) - // Collect output from the last result in context - const lastResultKey = [...context.results.keys()].pop() - const output = lastResultKey !== undefined ? context.results.get(lastResultKey) : undefined + return result + } catch (err: unknown) { + const errorMessage = err instanceof Error ? err.message : String(err) - return { - status, - duration_ms: Math.round(performance.now() - startTime), - steps, - output, - } - } catch (err: unknown) { - const errorMessage = err instanceof Error ? err.message : String(err) + recordStep({ + node_id: nodeId, + type: 'action', + status: 'error', + duration_ms: Math.round(performance.now() - stepStart), + error: errorMessage, + }) - // Execute compensation stack on error - await runCompensation(compensationStack, context, options, steps) + // Store error info in state so error handler can access it + ctx.state[nodeId] = { error: errorMessage } - return { - status: 'error', - duration_ms: Math.round(performance.now() - startTime), - steps, - error: errorMessage, - } - } -} + throw err + } + }, + + onSwitch: async ( + nodeId: string, + node: SwitchNode, + ctx: ExecutionContext, + ): Promise => { + const stepStart = performance.now() + const legacyCtx = buildLegacyContext(ctx) + + // Rules-driven switch + if (node.rules) { + if (node.rules.evaluator && node.rules.evaluator !== 'builtin') { + throw new Error( + `Switch node "${nodeId}" uses unknown evaluator "${node.rules.evaluator}". Only "builtin" is supported.`, + ) + } -async function executeAction( - nodeId: string, - node: ActionNode, - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], - compensationStack: CompensationEntry[], - doc: FlowprintDocument, -): Promise { - const stepStart = performance.now() + const rulesDoc = loadRulesFile(node.rules.file, options.projectRoot) + const rulesResult = evaluateRules(rulesDoc, legacyCtx, options.expressionTimeout) + ctx.state[nodeId] = rulesResult.output + + const output = rulesResult.output as Record + const nextNode = output.next as string | undefined + + if (nextNode) { + recordStep({ + node_id: nodeId, + type: 'switch', + status: 'matched', + duration_ms: Math.round(performance.now() - stepStart), + next: nextNode, + }) + return nextNode + } - try { - // Rules-driven action: evaluate rules file instead of entry points - if (node.rules) { - if (node.rules.evaluator && node.rules.evaluator !== 'builtin') { - throw new Error( - `Action node "${nodeId}" uses unknown evaluator "${node.rules.evaluator}". Only "builtin" is supported.`, - ) + if (node.default) { + recordStep({ + node_id: nodeId, + type: 'switch', + status: 'default', + duration_ms: Math.round(performance.now() - stepStart), + next: node.default, + }) + return node.default + } + + recordStep({ + node_id: nodeId, + type: 'switch', + status: 'no-match', + duration_ms: Math.round(performance.now() - stepStart), + }) + return undefined + } + + // Evaluate cases top-to-bottom, follow first match + for (let i = 0; i < (node.cases?.length ?? 0); i++) { + const c = node.cases?.[i] + if (!c) continue + + const result = evaluateExpression(c.when, legacyCtx, options.expressionTimeout) + + if (result) { + recordStep({ + node_id: nodeId, + type: 'switch', + status: 'matched', + duration_ms: Math.round(performance.now() - stepStart), + matched_case: i, + next: c.next, + }) + return c.next + } } - const rulesDoc = loadRulesFile(node.rules.file, options.projectRoot) - const rulesResult = evaluateRules(rulesDoc, context, options.expressionTimeout) - context.results.set(nodeId, rulesResult.output) + // Fall through to default + if (node.default) { + recordStep({ + node_id: nodeId, + type: 'switch', + status: 'default', + duration_ms: Math.round(performance.now() - stepStart), + next: node.default, + }) + return node.default + } - steps.push({ + // No match and no default + recordStep({ node_id: nodeId, - type: 'action', - status: 'completed', + type: 'switch', + status: 'no-match', duration_ms: Math.round(performance.now() - stepStart), - next: node.next, }) + return undefined + }, + + onParallel: async ( + nodeId: string, + node: ParallelNode, + ctx: ExecutionContext, + ): Promise => { + const stepStart = performance.now() + + const branchPromises = node.branches.map(async (branchId) => { + const branchNode = doc.nodes[branchId] + if (!branchNode) { + throw new Error(`Parallel branch node "${branchId}" not found`) + } - return node.next - } + if (isActionNode(branchNode)) { + const entryPoint = branchNode.entry_points?.[0] + if (!entryPoint) { + throw new Error(`Action node "${branchId}" has no entry_point defined`) + } + + const fn = await loadEntryPoint(entryPoint, options.projectRoot) + + let args: unknown + if (branchNode.inputs) { + const evaluated: Record = {} + const legacyCtx = buildLegacyContext(ctx) + for (const [key, expr] of Object.entries(branchNode.inputs)) { + evaluated[key] = evaluateExpression(expr, legacyCtx, options.expressionTimeout) + } + args = evaluated + } else { + args = ctx.input + } + + const result = await fn(args) + ctx.state[branchId] = result + + recordStep({ + node_id: branchId, + type: 'action', + status: 'completed', + }) + + return { branchId, result } + } - const entryPoint = node.entry_points?.[0] - if (!entryPoint) { - throw new Error(`Action node "${nodeId}" has no entry_point or rules defined`) - } + throw new Error( + `Parallel branch "${branchId}" is not an action node (type: ${branchNode.type})`, + ) + }) - const fn = await loadEntryPoint(entryPoint, options.projectRoot) + const strategy = node.join_strategy ?? 'all' + let parallelResult: unknown - // Evaluate input expressions if present - let args: unknown - if (node.inputs) { - const evaluated: Record = {} - for (const [key, expr] of Object.entries(node.inputs)) { - evaluated[key] = evaluateExpression(expr, context, options.expressionTimeout) + if (strategy === 'first') { + const first = await Promise.race(branchPromises) + ctx.state[nodeId] = first.result + parallelResult = { [nodeId]: first.result } + } else { + const results = await Promise.all(branchPromises) + const resultMap: Record = {} + for (const r of results) { + resultMap[r.branchId] = r.result + } + ctx.state[nodeId] = resultMap + parallelResult = { [nodeId]: resultMap } } - args = evaluated - } else { - args = context.input - } - - const result = await fn(args) - context.results.set(nodeId, result) - // Track compensation if available - if (node.compensation) { - compensationStack.push({ - nodeId, - entry: node.compensation, + recordStep({ + node_id: nodeId, + type: 'parallel', + status: 'completed', + duration_ms: Math.round(performance.now() - stepStart), + next: node.join, }) - } - - steps.push({ - node_id: nodeId, - type: 'action', - status: 'completed', - duration_ms: Math.round(performance.now() - stepStart), - next: node.next, - }) - - return node.next - } catch (err: unknown) { - const errorMessage = err instanceof Error ? err.message : String(err) - steps.push({ - node_id: nodeId, - type: 'action', - status: 'error', - duration_ms: Math.round(performance.now() - stepStart), - error: errorMessage, - }) - - // Route to error handler if defined - if (node.error?.catch) { - const errorNodeId = node.error.catch - const errorNode = doc.nodes[errorNodeId] - if (errorNode && isErrorNode(errorNode)) { - // Store the error info so the error handler can access it - context.results.set(nodeId, { error: errorMessage }) - return errorNodeId + return parallelResult + }, + + onWait: async (nodeId: string, node: WaitNode, ctx: ExecutionContext): Promise => { + const stepStart = performance.now() + const fixtureData = options.fixtures?.[nodeId] + + if (fixtureData !== undefined) { + ctx.state[nodeId] = fixtureData + recordStep({ + node_id: nodeId, + type: 'wait', + status: 'fixture', + duration_ms: Math.round(performance.now() - stepStart), + next: node.next, + }) + return fixtureData } - } - - // No error handler — re-throw - throw err - } -} - -function executeSwitch( - nodeId: string, - node: SwitchNode, - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], -): string | undefined { - const stepStart = performance.now() - - // Rules-driven switch: evaluate rules file for routing - if (node.rules) { - if (node.rules.evaluator && node.rules.evaluator !== 'builtin') { - throw new Error( - `Switch node "${nodeId}" uses unknown evaluator "${node.rules.evaluator}". Only "builtin" is supported.`, - ) - } - - const rulesDoc = loadRulesFile(node.rules.file, options.projectRoot) - const rulesResult = evaluateRules(rulesDoc, context, options.expressionTimeout) - context.results.set(nodeId, rulesResult.output) - // Route via `then.next` from matching rule - const output = rulesResult.output as Record - const nextNode = output.next as string | undefined + // No fixture — if timeout_next is defined, route there as a timeout + if (node.timeout_next) { + ctx.state[nodeId] = undefined + recordStep({ + node_id: nodeId, + type: 'wait', + status: 'timeout', + duration_ms: Math.round(performance.now() - stepStart), + next: node.timeout_next, + error: `No fixture data for wait node "${nodeId}" (event: ${node.event}). Routing to timeout_next.`, + }) + return undefined + } - if (nextNode) { - steps.push({ + // No fixture, no timeout_next — skip with warning + ctx.state[nodeId] = undefined + recordStep({ node_id: nodeId, - type: 'switch', - status: 'matched', + type: 'wait', + status: 'skipped', duration_ms: Math.round(performance.now() - stepStart), - next: nextNode, + next: node.next, + error: `No fixture data for wait node "${nodeId}" (event: ${node.event}). Use --fixtures to provide signal data.`, }) - return nextNode - } + return undefined + }, + + onError: async ( + nodeId: string, + node: ErrorNode, + ctx: ExecutionContext, + ): Promise => { + const stepStart = performance.now() + + if (node.entry_points?.[0]) { + const fn = await loadEntryPoint(node.entry_points[0], options.projectRoot) + const result = await fn(ctx.input) + ctx.state[nodeId] = result + } - // No `next` in output — fall through to default - if (node.default) { - steps.push({ + recordStep({ node_id: nodeId, - type: 'switch', - status: 'default', + type: 'error', + status: 'handled', duration_ms: Math.round(performance.now() - stepStart), - next: node.default, + next: node.next, }) - return node.default - } - - steps.push({ - node_id: nodeId, - type: 'switch', - status: 'no-match', - duration_ms: Math.round(performance.now() - stepStart), - }) - return undefined - } - // Evaluate cases top-to-bottom, follow first match - for (let i = 0; i < (node.cases?.length ?? 0); i++) { - const c = node.cases?.[i] - if (!c) continue - - const result = evaluateExpression(c.when, context, options.expressionTimeout) - - if (result) { - steps.push({ + return node.next + }, + + onTrigger: async ( + nodeId: string, + node: TriggerNode, + _ctx: ExecutionContext, + ): Promise => { + recordStep({ node_id: nodeId, - type: 'switch', - status: 'matched', - duration_ms: Math.round(performance.now() - stepStart), - matched_case: i, - next: c.next, + type: 'trigger', + status: 'fired', + next: node.next as string | undefined, }) - return c.next - } - } - - // Fall through to default - if (node.default) { - steps.push({ - node_id: nodeId, - type: 'switch', - status: 'default', - duration_ms: Math.round(performance.now() - stepStart), - next: node.default, - }) - return node.default - } - // No match and no default - steps.push({ - node_id: nodeId, - type: 'switch', - status: 'no-match', - duration_ms: Math.round(performance.now() - stepStart), - }) - return undefined -} + return node.next as string | undefined + }, -async function executeParallel( - nodeId: string, - node: ParallelNode, - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], - compensationStack: CompensationEntry[], - doc: FlowprintDocument, -): Promise { - const stepStart = performance.now() - - const branchPromises = node.branches.map(async (branchId) => { - const branchNode = doc.nodes[branchId] - if (!branchNode) { - throw new Error(`Parallel branch node "${branchId}" not found`) - } - - if (isActionNode(branchNode)) { - const entryPoint = branchNode.entry_points?.[0] - if (!entryPoint) { - throw new Error(`Action node "${branchId}" has no entry_point defined`) + onTerminal: async ( + nodeId: string, + node: TerminalNode, + _ctx: ExecutionContext, + ): Promise => { + recordStep({ + node_id: nodeId, + type: 'terminal', + status: 'reached', + outcome: node.outcome, + }) + }, + + onStep: (_record: StepResult): void => { + // No-op: step recording is handled by the closure-captured recordStep + }, + + onCompensation: ( + _nodeId: string, + compensation: { file: string; symbol: string }, + result: unknown, + ): (() => Promise) => { + return async () => { + const fn = await loadEntryPoint(compensation, options.projectRoot) + await fn(result) } - - const fn = await loadEntryPoint(entryPoint, options.projectRoot) - - let args: unknown - if (branchNode.inputs) { - const evaluated: Record = {} - for (const [key, expr] of Object.entries(branchNode.inputs)) { - evaluated[key] = evaluateExpression(expr, context, options.expressionTimeout) - } - args = evaluated + }, + + onCompensationStep: (nodeId: string, error?: Error): void => { + if (error) { + recordStep({ + node_id: `${nodeId}:compensate`, + type: 'compensation', + status: 'error', + duration_ms: 0, + error: error.message, + }) } else { - args = context.input - } - - const result = await fn(args) - context.results.set(branchId, result) - - if (branchNode.compensation) { - compensationStack.push({ - nodeId: branchId, - entry: branchNode.compensation, + recordStep({ + node_id: `${nodeId}:compensate`, + type: 'compensation', + status: 'completed', + duration_ms: 0, }) } + }, - steps.push({ - node_id: branchId, - type: 'action', - status: 'completed', - }) - - return { branchId, result } - } - - throw new Error( - `Parallel branch "${branchId}" is not an action node (type: ${branchNode.type})`, - ) - }) - - const strategy = node.join_strategy ?? 'all' - - if (strategy === 'first') { - const first = await Promise.race(branchPromises) - context.results.set(nodeId, first.result) - } else { - // 'all' — wait for all branches - const results = await Promise.all(branchPromises) - const resultMap: Record = {} - for (const r of results) { - resultMap[r.branchId] = r.result - } - context.results.set(nodeId, resultMap) - } - - steps.push({ - node_id: nodeId, - type: 'parallel', - status: 'completed', - duration_ms: Math.round(performance.now() - stepStart), - next: node.join, - }) - - return node.join -} - -function executeWait( - nodeId: string, - node: WaitNode, - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], -): string | undefined { - const stepStart = performance.now() - - const fixtureData = options.fixtures?.[nodeId] - - if (fixtureData !== undefined) { - context.results.set(nodeId, fixtureData) - steps.push({ - node_id: nodeId, - type: 'wait', - status: 'fixture', - duration_ms: Math.round(performance.now() - stepStart), - next: node.next, - }) - return node.next - } - - // No fixture — if timeout_next is defined, route there as a timeout - if (node.timeout_next) { - steps.push({ - node_id: nodeId, - type: 'wait', - status: 'timeout', - duration_ms: Math.round(performance.now() - stepStart), - next: node.timeout_next, - error: `No fixture data for wait node "${nodeId}" (event: ${node.event}). Routing to timeout_next.`, - }) - context.results.set(nodeId, undefined) - return node.timeout_next + resolveWaitNext: (nodeId: string, node: WaitNode, _result: unknown): string | undefined => { + const fixtureData = options.fixtures?.[nodeId] + if (fixtureData !== undefined) { + return node.next + } + if (node.timeout_next) { + return node.timeout_next + } + return node.next + }, } - // No fixture, no timeout_next — skip with warning - steps.push({ - node_id: nodeId, - type: 'wait', - status: 'skipped', - duration_ms: Math.round(performance.now() - stepStart), - next: node.next, - error: `No fixture data for wait node "${nodeId}" (event: ${node.event}). Use --fixtures to provide signal data.`, - }) - context.results.set(nodeId, undefined) - return node.next -} - -async function executeErrorHandler( - nodeId: string, - node: ErrorNode, - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], -): Promise { - const stepStart = performance.now() - - if (node.entry_points?.[0]) { - const fn = await loadEntryPoint(node.entry_points[0], options.projectRoot) - const result = await fn(context.input) - context.results.set(nodeId, result) - } + try { + const result = await walkGraph(doc, input, callbacks) - steps.push({ - node_id: nodeId, - type: 'error', - status: 'handled', - duration_ms: Math.round(performance.now() - stepStart), - next: node.next, - }) + // Determine final status from the last step + const lastStep = steps[steps.length - 1] + const lastOutcome = lastStep?.outcome + const status = result.outcome === 'failure' || lastOutcome === 'failure' ? 'failure' : 'success' - return node.next -} + // Collect output from the accumulated state + const output = Object.keys(result.output).length > 0 ? result.output : undefined -function executeTrigger( - nodeId: string, - node: TriggerNode, - steps: StepResult[], -): string | undefined { - steps.push({ - node_id: nodeId, - type: 'trigger', - status: 'fired', - next: node.next as string | undefined, - }) - - return node.next as string | undefined -} - -function executeTerminal(nodeId: string, node: TerminalNode, steps: StepResult[]): void { - steps.push({ - node_id: nodeId, - type: 'terminal', - status: 'reached', - outcome: node.outcome, - }) -} + return { + status, + duration_ms: Math.round(performance.now() - startTime), + steps, + output, + } + } catch (err: unknown) { + const errorMessage = err instanceof Error ? err.message : String(err) -async function runCompensation( - compensationStack: CompensationEntry[], - context: ExecutionContext, - options: RunOptions, - steps: StepResult[], -): Promise { - // Execute compensation stack in LIFO order - while (compensationStack.length > 0) { - const comp = compensationStack.pop() - if (!comp) break - - const stepStart = performance.now() - try { - const fn = await loadEntryPoint(comp.entry, options.projectRoot) - const result = context.results.get(comp.nodeId) - await fn(result) - - steps.push({ - node_id: `${comp.nodeId}:compensate`, - type: 'compensation', - status: 'completed', - duration_ms: Math.round(performance.now() - stepStart), - }) - } catch (err: unknown) { - const message = err instanceof Error ? err.message : String(err) - steps.push({ - node_id: `${comp.nodeId}:compensate`, - type: 'compensation', - status: 'error', - duration_ms: Math.round(performance.now() - stepStart), - error: message, - }) + return { + status: 'error', + duration_ms: Math.round(performance.now() - startTime), + steps, + error: errorMessage, } } } diff --git a/packages/engine/src/security/index.ts b/packages/engine/src/security/index.ts new file mode 100644 index 0000000..4547fb8 --- /dev/null +++ b/packages/engine/src/security/index.ts @@ -0,0 +1 @@ +export { assertWithinProject } from './path-containment.js' diff --git a/packages/engine/src/security/path-containment.ts b/packages/engine/src/security/path-containment.ts new file mode 100644 index 0000000..4449781 --- /dev/null +++ b/packages/engine/src/security/path-containment.ts @@ -0,0 +1,16 @@ +import { resolve, sep, relative } from 'node:path' + +/** + * Assert that a file path resolves within the project root directory. + * Prevents path traversal attacks (e.g. `../../etc/passwd`). + * + * @param filePath - The file path to validate (absolute or relative) + * @param projectRoot - The root directory boundary + * @throws Error if the resolved path is outside the project root + */ +export function assertWithinProject(filePath: string, projectRoot: string): void { + const resolved = resolve(projectRoot, filePath) + if (!resolved.startsWith(projectRoot + sep) && resolved !== projectRoot) { + throw new Error(`Path "${relative(projectRoot, resolved)}" resolves outside project root`) + } +} diff --git a/packages/engine/src/walker/index.ts b/packages/engine/src/walker/index.ts index e86c2e4..83b8391 100644 --- a/packages/engine/src/walker/index.ts +++ b/packages/engine/src/walker/index.ts @@ -5,3 +5,6 @@ export type { WalkOptions, WalkResult, } from './types.js' + +export { walkGraph } from './walk.js' +export type { WalkGraphCallbacks, CompensationEntry } from './walk.js' diff --git a/packages/engine/src/walker/walk.ts b/packages/engine/src/walker/walk.ts new file mode 100644 index 0000000..60c1796 --- /dev/null +++ b/packages/engine/src/walker/walk.ts @@ -0,0 +1,229 @@ +import type { FlowprintDocument, WaitNode } from '@ruminaider/flowprint-schema' +import { + findRoots, + isActionNode, + isSwitchNode, + isParallelNode, + isWaitNode, + isErrorNode, + isTerminalNode, + isTriggerNode, +} from '@ruminaider/flowprint-schema' +import type { + ExecutionContext, + WalkerCallbacks, + WalkOptions, + WalkResult, + NodeExecutionRecord, +} from './types.js' + +/** + * Compensation entry pushed after a successful action with a `compensation` field. + * The handler is provided by the consumer and called during LIFO unwind. + */ +export interface CompensationEntry { + nodeId: string + handler: () => Promise +} + +/** + * Extended callbacks for walkGraph beyond the base WalkerCallbacks. + * These hooks give the consumer control over compensation and wait routing. + */ +export interface WalkGraphCallbacks extends WalkerCallbacks { + /** + * Called when an action node with a `compensation` field completes successfully. + * Should return a handler function that performs the compensation. + */ + onCompensation?( + nodeId: string, + compensation: { file: string; symbol: string }, + result: unknown, + ): () => Promise + + /** + * Called during compensation unwind for each handler (success or failure). + * Used for trace recording of compensation steps. + */ + onCompensationStep?(nodeId: string, error?: Error): void + + /** + * Determines the next node for a wait node. + * Can return node.timeout_next for timeout scenarios. + * Defaults to node.next if not provided. + */ + resolveWaitNext?(nodeId: string, node: WaitNode, result: unknown): string | undefined +} + +/** + * Generic, callback-driven graph walker. + * + * Owns traversal logic (chain-following via `next` pointers), context management + * (flat-merge after each node), compensation stack (LIFO), and AbortSignal checking. + * Delegates node-type-specific behavior to `callbacks`. + * + * The `callbacks.onStep` function is the step recorder. Each callback implementation + * (onAction, onSwitch, etc.) should call `onStep(record)` to record its step. + * walkGraph intercepts these calls and collects them into `WalkResult.trace`. + * + * Key behaviors centralized here: + * 1. Root node finding via findRoots() + * 2. Chain-following loop (follow `next` pointers, NOT topological) + * 3. Context management (fresh ExecutionContext, flat-merge after each node) + * 4. AbortSignal checking before each node + * 5. Compensation stack management (LIFO, best-effort) + * 6. Action error -> error node routing + * 7. Terminal handling (outcome capture) + */ +export async function walkGraph( + doc: FlowprintDocument, + input: Record, + callbacks: WalkGraphCallbacks, + options?: WalkOptions, +): Promise> { + const signal = options?.abortController?.signal ?? new AbortController().signal + const trace: TStep[] = [] + const compensationStack: CompensationEntry[] = [] + const state: Record = {} + + // Wrap onStep to capture records into the trace array + const userOnStep = callbacks.onStep.bind(callbacks) + callbacks.onStep = (record: TStep): void => { + trace.push(record) + userOnStep(record) + } + + // Find root nodes (nodes with no incoming edges) + const roots = findRoots(doc) + if (roots.length === 0) { + throw new Error('No root nodes found in the document') + } + + let currentNodeId: string | undefined = roots[0] + let outcome: 'success' | 'failure' | undefined + + const makeCtx = (nodeId: string, nodeType: string, lane: string): ExecutionContext => ({ + input, + state, + node: { id: nodeId, type: nodeType, lane }, + signal, + }) + + try { + while (currentNodeId) { + // Check abort before each node + if (signal.aborted) { + break + } + + const node = doc.nodes[currentNodeId] + if (!node) { + throw new Error(`Node "${currentNodeId}" not found in document`) + } + + const ctx = makeCtx(currentNodeId, node.type, node.lane) + + if (isTriggerNode(node)) { + const nextFromCallback = await callbacks.onTrigger(currentNodeId, node, ctx) + currentNodeId = nextFromCallback ?? (node.next as string | undefined) + } else if (isActionNode(node)) { + try { + const result = await callbacks.onAction(currentNodeId, node, ctx) + mergeOutput(state, result) + + // Track compensation if available + if (node.compensation && callbacks.onCompensation) { + compensationStack.push({ + nodeId: currentNodeId, + handler: callbacks.onCompensation(currentNodeId, node.compensation, result), + }) + } + + currentNodeId = node.next + } catch (err: unknown) { + // Route to error handler if defined on the action node + if (node.error?.catch) { + const errorNodeId = node.error.catch + const errorNode = doc.nodes[errorNodeId] + if (errorNode && isErrorNode(errorNode)) { + currentNodeId = errorNodeId + continue + } + } + // No error handler — re-throw to outer catch + throw err + } + } else if (isSwitchNode(node)) { + const nextNodeId = await callbacks.onSwitch(currentNodeId, node, ctx) + currentNodeId = nextNodeId + } else if (isParallelNode(node)) { + const result = await callbacks.onParallel(currentNodeId, node, ctx) + mergeOutput(state, result) + currentNodeId = node.join + } else if (isWaitNode(node)) { + const result = await callbacks.onWait(currentNodeId, node, ctx) + mergeOutput(state, result) + + if (callbacks.resolveWaitNext) { + currentNodeId = callbacks.resolveWaitNext(currentNodeId, node, result) + } else { + currentNodeId = node.next + } + } else if (isErrorNode(node)) { + const nextFromCallback = await callbacks.onError(currentNodeId, node, ctx) + currentNodeId = nextFromCallback ?? node.next + } else if (isTerminalNode(node)) { + outcome = node.outcome as 'success' | 'failure' | undefined + if (callbacks.onTerminal) { + await callbacks.onTerminal(currentNodeId, node, ctx) + } + currentNodeId = undefined + } else { + throw new Error(`Unknown node type for node "${currentNodeId}"`) + } + } + } catch (err: unknown) { + // Execute compensation stack in LIFO order (best-effort) + await runCompensationStack(compensationStack, callbacks) + throw err + } + + return { + output: { ...state }, + trace, + outcome, + } +} + +/** + * Flat-merge a node's output into the accumulated state. + * Only merges plain objects; arrays and primitives are ignored. + */ +function mergeOutput(state: Record, result: unknown): void { + if (result && typeof result === 'object' && !Array.isArray(result)) { + Object.assign(state, result) + } +} + +/** + * Execute compensation handlers in LIFO order. + * Best-effort: continues even if individual handlers fail. + */ +async function runCompensationStack( + stack: CompensationEntry[], + callbacks: WalkGraphCallbacks, +): Promise { + while (stack.length > 0) { + const entry = stack.pop() + if (!entry) break + try { + await entry.handler() + callbacks.onCompensationStep?.(entry.nodeId) + } catch (err: unknown) { + callbacks.onCompensationStep?.( + entry.nodeId, + err instanceof Error ? err : new Error(String(err)), + ) + } + } +} diff --git a/packages/schema/flowprint.schema.json b/packages/schema/flowprint.schema.json index 8267990..66f6580 100644 --- a/packages/schema/flowprint.schema.json +++ b/packages/schema/flowprint.schema.json @@ -114,6 +114,14 @@ "minimum": 0, "description": "Display order (0 = topmost lane)" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "height": { "type": "number", "minimum": 140, @@ -201,6 +209,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -216,6 +232,13 @@ "rules": { "$ref": "#/definitions/RulesRef" }, + "expressions": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Key-value pairs of output field name to expression string. Makes this an engine-native transform node." + }, "position": { "$ref": "#/definitions/Position" }, @@ -267,6 +290,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -333,6 +364,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -388,6 +427,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -458,6 +505,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -500,6 +555,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { @@ -540,6 +603,14 @@ "type": "string", "description": "Markdown notes for documentation and design rationale" }, + "data_class": { + "type": "array", + "items": { + "type": "string", + "enum": ["pii", "financial", "credentials", "internal"] + }, + "description": "Data classification labels for trace redaction policy" + }, "metadata": { "type": "object", "additionalProperties": { diff --git a/packages/schema/src/__tests__/data-class-expressions.test.ts b/packages/schema/src/__tests__/data-class-expressions.test.ts new file mode 100644 index 0000000..f79bf3a --- /dev/null +++ b/packages/schema/src/__tests__/data-class-expressions.test.ts @@ -0,0 +1,601 @@ +import { describe, it, expect } from 'vitest' +import { parse } from 'yaml' +import { validate } from '../validate.js' +import { serialize } from '../serialize.js' +import type { FlowprintDocument } from '../types.js' + +/** + * Helper to create a minimal valid FlowprintDocument. + */ +function makeDoc(overrides: Partial = {}): FlowprintDocument { + return { + schema: 'flowprint/1.0', + name: 'test-blueprint', + version: '1.0.0', + lanes: { + main: { label: 'Main', visibility: 'internal', order: 0 }, + }, + nodes: { + start: { type: 'action', lane: 'main', label: 'Start', next: 'done' }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + ...overrides, + } +} + +// --------------------------------------------------------------------------- +// data_class on lanes +// --------------------------------------------------------------------------- + +describe('data_class on lanes', () => { + it('accepts a lane with data_class', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { + main: { label: 'Main', visibility: 'external', order: 0, data_class: ['pii', 'financial'] }, + }, + nodes: { + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts a lane without data_class (backward compat)', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { + main: { label: 'Main', visibility: 'external', order: 0 }, + }, + nodes: { + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('rejects invalid data_class value on lane', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { + main: { + label: 'Main', + visibility: 'external', + order: 0, + data_class: ['invalid_class'], + }, + }, + nodes: { + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + expect(result.errors.length).toBeGreaterThan(0) + }) +}) + +// --------------------------------------------------------------------------- +// data_class on nodes +// --------------------------------------------------------------------------- + +describe('data_class on nodes', () => { + it('accepts an action node with data_class', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + data_class: ['credentials'], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts a switch node with data_class', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + decision: { + type: 'switch', + lane: 'main', + label: 'Decision', + data_class: ['pii'], + cases: [{ when: 'yes', next: 'end' }], + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts a terminal node with data_class', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + end: { + type: 'terminal', + lane: 'main', + label: 'End', + data_class: ['internal'], + outcome: 'success', + }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('rejects invalid data_class value on node', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + data_class: ['invalid_class'], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + expect(result.errors.length).toBeGreaterThan(0) + }) + + it('accepts multiple valid data_class values on a node', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + data_class: ['pii', 'financial', 'credentials', 'internal'], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) +}) + +// --------------------------------------------------------------------------- +// expressions field on action nodes +// --------------------------------------------------------------------------- + +describe('expressions field', () => { + it('accepts an action node with only expressions', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + transform: { + type: 'action', + lane: 'main', + label: 'Transform', + expressions: { total: 'price * quantity', tax: 'total * 0.1' }, + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts an action node with only rules', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + rules: { file: 'rules/pricing.rules.yaml' }, + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts an action node with only entry_points', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + entry_points: [{ file: 'src/handler.ts', symbol: 'handle' }], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) + + it('accepts an action node with none of expressions, rules, or entry_points', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(true) + expect(result.errors).toEqual([]) + }) +}) + +// --------------------------------------------------------------------------- +// Mutual exclusivity: expressions vs rules vs entry_points +// --------------------------------------------------------------------------- + +describe('mutual exclusivity', () => { + it('rejects action node with both expressions and rules', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + expressions: { total: 'price * quantity' }, + rules: { file: 'rules/pricing.rules.yaml' }, + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => + e.path === '/nodes/step' && + e.message.includes('expressions') && + e.message.includes('rules'), + ), + ).toBe(true) + }) + + it('rejects action node with both expressions and entry_points', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + expressions: { total: 'price * quantity' }, + entry_points: [{ file: 'src/handler.ts', symbol: 'handle' }], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => + e.path === '/nodes/step' && + e.message.includes('expressions') && + e.message.includes('entry_points'), + ), + ).toBe(true) + }) + + it('rejects action node with both rules and entry_points', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + rules: { file: 'rules/pricing.rules.yaml' }, + entry_points: [{ file: 'src/handler.ts', symbol: 'handle' }], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + expect( + result.errors.some( + (e) => + e.path === '/nodes/step' && + e.message.includes('rules') && + e.message.includes('entry_points'), + ), + ).toBe(true) + }) + + it('rejects action node with all three: expressions, rules, and entry_points', () => { + const result = validate({ + schema: 'flowprint/1.0', + name: 'test', + version: '1.0.0', + lanes: { main: { label: 'Main', visibility: 'external', order: 0 } }, + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + expressions: { total: 'price * quantity' }, + rules: { file: 'rules/pricing.rules.yaml' }, + entry_points: [{ file: 'src/handler.ts', symbol: 'handle' }], + next: 'end', + }, + end: { type: 'terminal', lane: 'main', label: 'End', outcome: 'success' }, + }, + }) + expect(result.valid).toBe(false) + // Should have multiple mutual exclusivity errors + const mutualErrors = result.errors.filter( + (e) => e.path === '/nodes/step' && e.severity === 'error', + ) + expect(mutualErrors.length).toBeGreaterThanOrEqual(3) + }) +}) + +// --------------------------------------------------------------------------- +// Serialization: data_class and expressions +// --------------------------------------------------------------------------- + +describe('serialization', () => { + it('round-trips document with data_class on lanes', () => { + const doc = makeDoc({ + lanes: { + main: { + label: 'Main', + visibility: 'internal', + order: 0, + data_class: ['pii', 'financial'], + }, + }, + }) + const yaml = serialize(doc) + const parsed = parse(yaml) as FlowprintDocument + expect(parsed.lanes.main?.data_class).toEqual(['pii', 'financial']) + }) + + it('round-trips document with data_class on nodes', () => { + const doc = makeDoc({ + nodes: { + step: { + type: 'action', + lane: 'main', + label: 'Step', + data_class: ['credentials'], + next: 'done', + }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + }) + const yaml = serialize(doc) + const parsed = parse(yaml) as FlowprintDocument + const node = parsed.nodes.step as { data_class?: string[] } + expect(node.data_class).toEqual(['credentials']) + }) + + it('round-trips document with expressions on action node', () => { + const doc = makeDoc({ + nodes: { + transform: { + type: 'action', + lane: 'main', + label: 'Transform', + expressions: { total: 'price * quantity', tax: 'total * 0.1' }, + next: 'done', + }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + }) + const yaml = serialize(doc) + const parsed = parse(yaml) as FlowprintDocument + const node = parsed.nodes.transform as { expressions?: Record } + expect(node.expressions).toEqual({ total: 'price * quantity', tax: 'total * 0.1' }) + }) + + it('round-trips document with data_class and expressions together', () => { + const doc = makeDoc({ + lanes: { + main: { + label: 'Main', + visibility: 'internal', + order: 0, + data_class: ['financial'], + }, + }, + nodes: { + transform: { + type: 'action', + lane: 'main', + label: 'Transform', + data_class: ['pii'], + expressions: { total: 'price * quantity' }, + next: 'done', + }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + }) + const yaml = serialize(doc) + const parsed = parse(yaml) as FlowprintDocument + expect(parsed).toEqual(doc) + }) + + describe('key ordering', () => { + it('outputs data_class before metadata in node key order', () => { + const doc = makeDoc({ + nodes: { + my_node: { + type: 'action', + lane: 'main', + label: 'My Node', + description: 'Does things', + data_class: ['pii'], + metadata: { sla: '5m' }, + next: 'done', + }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + }) + const yaml = serialize(doc) + + const lines = yaml.split('\n') + const nodeStart = lines.findIndex((l) => l.trimStart().startsWith('my_node:')) + expect(nodeStart).toBeGreaterThan(-1) + + const nodeKeys: string[] = [] + for (let i = nodeStart + 1; i < lines.length; i++) { + const line = lines[i] ?? '' + if (/^ {2}\S/.exec(line) || /^\S/.exec(line)) break + const keyMatch = /^ {4}(\w+):/.exec(line) + if (keyMatch) { + nodeKeys.push(keyMatch[1] ?? '') + } + } + + const dcIdx = nodeKeys.indexOf('data_class') + const mdIdx = nodeKeys.indexOf('metadata') + expect(dcIdx).toBeGreaterThan(-1) + expect(mdIdx).toBeGreaterThan(-1) + expect(dcIdx).toBeLessThan(mdIdx) + }) + + it('outputs expressions after entry_points in action node key order', () => { + const doc = makeDoc({ + nodes: { + my_node: { + type: 'action', + lane: 'main', + label: 'My Node', + expressions: { total: 'price * quantity' }, + next: 'done', + }, + done: { type: 'terminal', lane: 'main', label: 'Done', outcome: 'success' }, + }, + }) + const yaml = serialize(doc) + + const lines = yaml.split('\n') + const nodeStart = lines.findIndex((l) => l.trimStart().startsWith('my_node:')) + expect(nodeStart).toBeGreaterThan(-1) + + const nodeKeys: string[] = [] + for (let i = nodeStart + 1; i < lines.length; i++) { + const line = lines[i] ?? '' + if (/^ {2}\S/.exec(line) || /^\S/.exec(line)) break + const keyMatch = /^ {4}(\w+):/.exec(line) + if (keyMatch) { + nodeKeys.push(keyMatch[1] ?? '') + } + } + + // expressions should appear before next but after type-specific prefix fields + const exprIdx = nodeKeys.indexOf('expressions') + const nextIdx = nodeKeys.indexOf('next') + expect(exprIdx).toBeGreaterThan(-1) + expect(nextIdx).toBeGreaterThan(-1) + expect(exprIdx).toBeLessThan(nextIdx) + }) + + it('outputs data_class in correct position in lane key order', () => { + const doc = makeDoc({ + lanes: { + main: { + label: 'Main', + visibility: 'internal', + order: 0, + data_class: ['pii'], + }, + }, + }) + const yaml = serialize(doc) + + const lines = yaml.split('\n') + const laneStart = lines.findIndex((l) => l.trimStart().startsWith('main:')) + expect(laneStart).toBeGreaterThan(-1) + + const laneKeys: string[] = [] + for (let i = laneStart + 1; i < lines.length; i++) { + const line = lines[i] ?? '' + // Stop if we hit a same-level or top-level key + if (/^ {2}\S/.exec(line) || /^\S/.exec(line)) break + const keyMatch = /^ {4}(\w+):/.exec(line) + if (keyMatch) { + laneKeys.push(keyMatch[1] ?? '') + } + } + + const orderIdx = laneKeys.indexOf('order') + const dcIdx = laneKeys.indexOf('data_class') + expect(orderIdx).toBeGreaterThan(-1) + expect(dcIdx).toBeGreaterThan(-1) + expect(dcIdx).toBeGreaterThan(orderIdx) + }) + }) +}) diff --git a/packages/schema/src/serialize.ts b/packages/schema/src/serialize.ts index 91f81eb..a596855 100644 --- a/packages/schema/src/serialize.ts +++ b/packages/schema/src/serialize.ts @@ -25,6 +25,7 @@ const NODE_KEY_PREFIX = [ 'label', 'description', 'notes', + 'data_class', 'metadata', 'position', 'entry_points', @@ -35,7 +36,7 @@ const NODE_KEY_PREFIX = [ * Order matters for deterministic output. */ const NODE_TYPE_FIELDS: Record = { - action: ['rules', 'inputs', 'compensation', 'temporal', 'next', 'error'], + action: ['rules', 'expressions', 'inputs', 'compensation', 'temporal', 'next', 'error'], switch: ['rules', 'cases', 'default'], parallel: ['branches', 'join', 'join_strategy'], wait: ['event', 'event_type', 'event_type_import', 'timeout', 'next', 'timeout_next'], @@ -70,7 +71,7 @@ export function serialize(doc: FlowprintDocument): string { if (key === 'nodes') { rootMap.add(new Pair(key, serializeNodes(doc.nodes))) } else if (key === 'lanes') { - rootMap.add(new Pair(key, serializeOrderedMap(doc.lanes))) + rootMap.add(new Pair(key, serializeLanes(doc.lanes))) } else if (key === 'metadata' && typeof value === 'object') { rootMap.add(new Pair(key, serializeOrderedMap(value as Record))) } else if (key === 'workflow' && typeof value === 'object') { @@ -111,6 +112,42 @@ function serializeNodes(nodes: Record): YAMLMap { return nodesMap } +/** + * Lane key order for deterministic output. + */ +const LANE_KEY_ORDER = ['label', 'visibility', 'order', 'data_class', 'height'] as const + +/** + * Serialize the lanes map with deterministic key ordering per lane. + */ +function serializeLanes(lanes: Record): YAMLMap { + const lanesMap = new YAMLMap() + + for (const [laneId, lane] of Object.entries(lanes)) { + const laneMap = new YAMLMap() + const laneObj = lane as unknown as Record + + for (const key of LANE_KEY_ORDER) { + const value = laneObj[key] + if (value === undefined) continue + + if (key === 'data_class' && Array.isArray(value)) { + const seq = new YAMLSeq() + for (const item of value as string[]) { + seq.add(createScalar(item)) + } + laneMap.add(new Pair(key, seq)) + } else { + laneMap.add(new Pair(key, createScalar(value))) + } + } + + lanesMap.add(new Pair(laneId, laneMap)) + } + + return lanesMap +} + /** * Serialize a single node with deterministic key ordering. */ @@ -124,8 +161,16 @@ function serializeNode(node: Node): YAMLMap { const value = nodeObj[key] if (value === undefined) continue - if (key === 'rules' && typeof value === 'object' && value !== null) { + if (key === 'data_class' && Array.isArray(value)) { + const seq = new YAMLSeq() + for (const item of value as string[]) { + seq.add(createScalar(item)) + } + nodeMap.add(new Pair(key, seq)) + } else if (key === 'rules' && typeof value === 'object' && value !== null) { nodeMap.add(new Pair(key, serializeRulesRef(value as Record))) + } else if (key === 'expressions' && typeof value === 'object' && value !== null) { + nodeMap.add(new Pair(key, serializeOrderedMap(value as Record))) } else if (key === 'entry_points' && Array.isArray(value)) { nodeMap.add(new Pair(key, serializeEntryPoints(value as Record[]))) } else if (key === 'cases' && Array.isArray(value)) { diff --git a/packages/schema/src/structural.ts b/packages/schema/src/structural.ts index 3e489a3..e77dc3b 100644 --- a/packages/schema/src/structural.ts +++ b/packages/schema/src/structural.ts @@ -1,11 +1,19 @@ import type { ValidationError } from './types.js' +/** + * Node IDs that conflict with expression sandbox globals. + * Using these as node IDs would shadow built-in variables in the + * expression evaluator, leading to subtle bugs or security issues. + */ +const RESERVED_NODE_IDS = new Set(['input', 'state', 'Math', 'node', 'output']) + /** * Perform structural validation on a schema-valid Flowprint document. * Checks for: - * 1. Dangling node references (next, cases[].next, branches[], join, error.catch, default, timeout_next) - * 2. Invalid lane references (node.lane must exist in lanes) - * 3. Orphan nodes (non-terminals: no incoming AND no outgoing; terminals: no incoming) + * 1. Reserved node IDs (conflict with expression sandbox globals) + * 2. Dangling node references (next, cases[].next, branches[], join, error.catch, default, timeout_next) + * 3. Invalid lane references (node.lane must exist in lanes) + * 4. Orphan nodes (non-terminals: no incoming AND no outgoing; terminals: no incoming) * * This function assumes the document has already passed schema validation. */ @@ -19,6 +27,17 @@ export function validateStructure(doc: Record): ValidationError return errors } + // Check for reserved node IDs + for (const nodeId of Object.keys(nodes)) { + if (RESERVED_NODE_IDS.has(nodeId)) { + errors.push({ + path: `/nodes/${nodeId}`, + message: `Node ID "${nodeId}" is reserved (conflicts with expression sandbox globals). Reserved IDs: ${[...RESERVED_NODE_IDS].join(', ')}`, + severity: 'error', + }) + } + } + const laneIds = new Set(Object.keys(lanes)) const nodeIds = new Set(Object.keys(nodes)) @@ -41,13 +60,34 @@ export function validateStructure(doc: Record): ValidationError const type = node.type as string - // Mutual exclusion: rules vs entry_points/cases - if (type === 'action' && node.rules !== undefined && node.entry_points !== undefined) { - errors.push({ - path: `/nodes/${nodeId}`, - message: 'Action node cannot have both "rules" and "entry_points". Use one or the other', - severity: 'error', - }) + // Mutual exclusion: expressions vs rules vs entry_points on action nodes + if (type === 'action') { + const hasRules = node.rules !== undefined + const hasEntryPoints = node.entry_points !== undefined + const hasExpressions = node.expressions !== undefined + + if (hasExpressions && hasRules) { + errors.push({ + path: `/nodes/${nodeId}`, + message: 'Action node cannot have both "expressions" and "rules". Use one or the other', + severity: 'error', + }) + } + if (hasExpressions && hasEntryPoints) { + errors.push({ + path: `/nodes/${nodeId}`, + message: + 'Action node cannot have both "expressions" and "entry_points". Use one or the other', + severity: 'error', + }) + } + if (hasRules && hasEntryPoints) { + errors.push({ + path: `/nodes/${nodeId}`, + message: 'Action node cannot have both "rules" and "entry_points". Use one or the other', + severity: 'error', + }) + } } if (type === 'switch') { const hasCases = node.cases !== undefined diff --git a/packages/schema/src/types.generated.ts b/packages/schema/src/types.generated.ts index e113056..7a8f6d8 100644 --- a/packages/schema/src/types.generated.ts +++ b/packages/schema/src/types.generated.ts @@ -17,6 +17,10 @@ export type TriggerNode = { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; @@ -135,6 +139,10 @@ export interface Lane { * Display order (0 = topmost lane) */ order: number; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; /** * Optional custom lane height in pixels (minimum 140) */ @@ -149,11 +157,21 @@ export interface ActionNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; entry_points?: EntryPoint[]; rules?: RulesRef; + /** + * Key-value pairs of output field name to expression string. Makes this an engine-native transform node. + */ + expressions?: { + [k: string]: string; + }; position?: Position; /** * Named input mapping: parameter name to expression @@ -270,6 +288,10 @@ export interface SwitchNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; @@ -291,6 +313,10 @@ export interface ParallelNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; @@ -309,6 +335,10 @@ export interface WaitNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; @@ -345,6 +375,10 @@ export interface ErrorNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; }; @@ -360,6 +394,10 @@ export interface TerminalNode { * Markdown notes for documentation and design rationale */ notes?: string; + /** + * Data classification labels for trace redaction policy + */ + data_class?: ("pii" | "financial" | "credentials" | "internal")[]; metadata?: { [k: string]: string; };