From f12f0ef078646478e91e3b2e5679be0269e3dc9e Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Wed, 10 Sep 2025 11:30:10 +0300 Subject: [PATCH 01/35] Refactor event client to remove adapter abstraction Removed the adapter abstraction and related files for event handling. The event client now directly manages in-memory (socket.io) and Kafka backends, simplifying initialization, publishing, and subscription logic. Updated type definitions and streamlined the API. Also improved metrics tracking in the server event implementation. Updated package.json to reflect new entry points and removed obsolete scripts. --- client/Event.d.ts | 23 ++- client/Event.js | 181 +++++++++++------ client/Event.ts | 163 ++++++++++++--- client/adapters/Adapter.d.ts | 7 - client/adapters/Adapter.js | 2 - client/adapters/Adapter.ts | 10 - client/adapters/InMemoryAdapter.d.ts | 9 - client/adapters/InMemoryAdapter.js | 128 ------------ client/adapters/InMemoryAdapter.ts | 65 ------ client/adapters/KafkaAdapter.d.ts | 12 -- client/adapters/KafkaAdapter.js | 285 --------------------------- client/adapters/KafkaAdapter.ts | 162 --------------- client/adapters/types.d.ts | 17 -- client/adapters/types.js | 2 - client/adapters/types.ts | 24 --- package.json | 15 +- src/Event.ts | 13 +- 17 files changed, 284 insertions(+), 834 deletions(-) delete mode 100644 client/adapters/Adapter.d.ts delete mode 100644 client/adapters/Adapter.js delete mode 100644 client/adapters/Adapter.ts delete mode 100644 client/adapters/InMemoryAdapter.d.ts delete mode 100644 client/adapters/InMemoryAdapter.js delete mode 100644 client/adapters/InMemoryAdapter.ts delete mode 100644 client/adapters/KafkaAdapter.d.ts delete mode 100644 client/adapters/KafkaAdapter.js delete mode 100644 client/adapters/KafkaAdapter.ts delete mode 100644 client/adapters/types.d.ts delete mode 100644 client/adapters/types.js delete mode 100644 client/adapters/types.ts diff --git a/client/Event.d.ts b/client/Event.d.ts index cace05c..97b69a7 100644 --- a/client/Event.d.ts +++ b/client/Event.d.ts @@ -1,8 +1,23 @@ -import { Callback, InitOptions } from "./adapters/types"; +interface BaseInitOptions { + type: "inMemory" | "socket" | "kafka"; +} +interface InMemoryOptions extends BaseInitOptions { + type: "inMemory"; + host: string; + port?: number; + protocol: string; +} +interface KafkaOptions extends BaseInitOptions { + type: "kafka"; + clientId: string; + brokers: string[]; + groupId: string; +} +type InitOptions = InMemoryOptions | KafkaOptions; +type Callback = (payload: T) => void; declare const event: { - init(options: InitOptions): Promise; + init(options: InitOptions): void; publish(...args: [...string[], T]): Promise; subscribe(type: string, callback: Callback): Promise<() => void>; - cleanup(): Promise; }; -export { event }; +export { event }; \ No newline at end of file diff --git a/client/Event.js b/client/Event.js index 286545b..b1dec8a 100644 --- a/client/Event.js +++ b/client/Event.js @@ -37,24 +37,51 @@ var __generator = (this && this.__generator) || function (thisArg, body) { }; Object.defineProperty(exports, "__esModule", { value: true }); exports.event = void 0; -var InMemoryAdapter_1 = require("./adapters/InMemoryAdapter"); -var KafkaAdapter_1 = require("./adapters/KafkaAdapter"); +var socket_io_client_1 = require("socket.io-client"); +var kafkajs_1 = require("kafkajs"); +var socket = null; +var kafka = null; +var kafkaGroupId = null; +var callbacks = {}; var event = { init: function (options) { - return __awaiter(this, void 0, void 0, function () { - var adapter; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - adapter = options.type === "inMemory" ? new InMemoryAdapter_1.InMemoryAdapter() : new KafkaAdapter_1.KafkaAdapter(); - this._adapter = adapter; - return [4 /*yield*/, adapter.init(options)]; - case 1: - _a.sent(); - return [2 /*return*/]; + switch (options.type) { + case "inMemory": + if (!options.host) { + throw new Error("host is required for inMemory initialization"); } - }); - }); + if (!options.protocol) { + throw new Error("protocol is required for inMemory initialization"); + } + var host = options.host, protocol = options.protocol; + var socketPath = (options === null || options === void 0 ? void 0 : options.port) + ? "".concat(protocol, "://").concat(host, ":").concat(options.port) + : "".concat(protocol, "://").concat(host); + socket = (0, socket_io_client_1.io)(socketPath); + socket.on("event", function (_a) { + var type = _a.type, payload = _a.payload; + if (callbacks[type]) { + callbacks[type].forEach(function (cb) { return cb(payload); }); + } + }); + break; + case "kafka": + if (!options.clientId) { + throw new Error("clientId is required for Kafka initialization"); + } + if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { + throw new Error("brokers array is required for Kafka initialization"); + } + if (!options.groupId) { + throw new Error("groupId is required for Kafka initialization"); + } + kafka = new kafkajs_1.Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + kafkaGroupId = options.groupId; + break; + } }, publish: function () { var args = []; @@ -62,74 +89,98 @@ var event = { args[_i] = arguments[_i]; } return __awaiter(this, void 0, void 0, function () { - var adapter; + var payload, types, producer_1; return __generator(this, function (_a) { switch (_a.label) { case 0: - adapter = this._adapter; - if (!adapter) - throw new Error("Event not initialized"); - return [4 /*yield*/, adapter.publish.apply(adapter, args)]; + if (args.length < 2) { + throw new Error("publish requires at least one event type and a payload"); + } + payload = args[args.length - 1]; + types = args.slice(0, -1); + if (!socket) return [3 /*break*/, 1]; + types.forEach(function (type) { + socket.emit("publish", { type: type, payload: payload }); + }); + return [3 /*break*/, 4]; case 1: + if (!kafka) return [3 /*break*/, 4]; + producer_1 = kafka.producer(); + return [4 /*yield*/, producer_1.connect()]; + case 2: + _a.sent(); + types.forEach(function (type) { + producer_1.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + }); + return [4 /*yield*/, producer_1.disconnect()]; + case 3: _a.sent(); - return [2 /*return*/]; + _a.label = 4; + case 4: return [2 /*return*/]; } }); }); }, subscribe: function (type, callback) { return __awaiter(this, void 0, void 0, function () { - var adapter; - return __generator(this, function (_a) { - adapter = this._adapter; - if (!adapter) - throw new Error("Event not initialized"); - return [2 /*return*/, adapter.subscribe(type, callback)]; - }); - }); - }, - cleanup: function () { - return __awaiter(this, void 0, void 0, function () { - var adapter; + var consumer; + var _this = this; return __generator(this, function (_a) { switch (_a.label) { case 0: - adapter = this._adapter; - if (!adapter) - return [2 /*return*/]; - return [4 /*yield*/, adapter.cleanup()]; + if (!callbacks[type]) + callbacks[type] = new Set(); + callbacks[type].add(callback); + if (!socket) return [3 /*break*/, 1]; + socket.emit("subscribe", type); + return [3 /*break*/, 4]; case 1: + if (!kafka) return [3 /*break*/, 4]; + consumer = kafka.consumer({ groupId: kafkaGroupId }); + return [4 /*yield*/, consumer.connect()]; + case 2: + _a.sent(); + return [4 /*yield*/, consumer.subscribe({ topic: type, fromBeginning: true })]; + case 3: _a.sent(); - return [2 /*return*/]; + consumer.run({ + eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { + var payload_1; + var _c; + var topic = _b.topic, partition = _b.partition, message = _b.message; + return __generator(this, function (_d) { + if (callbacks[topic]) { + try { + payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); + callbacks[topic].forEach(function (cb) { return cb(payload_1); }); + } + catch (error) { + console.error("Failed to parse message from topic ".concat(topic, ":"), error); + } + } + return [2 /*return*/]; + }); + }); }, + }); + _a.label = 4; + case 4: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { + return __generator(this, function (_a) { + callbacks[type].delete(callback); + if (callbacks[type].size === 0) { + delete callbacks[type]; + if (socket) { + socket.emit("unsubscribe", type); + } + } + return [2 /*return*/]; + }); + }); }]; } }); }); }, }; -exports.event = event; -process.on("SIGINT", function () { return __awaiter(void 0, void 0, void 0, function () { - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - console.log("Shutting down gracefully..."); - return [4 /*yield*/, event.cleanup()]; - case 1: - _a.sent(); - process.exit(0); - return [2 /*return*/]; - } - }); -}); }); -process.on("SIGTERM", function () { return __awaiter(void 0, void 0, void 0, function () { - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - console.log("Shutting down gracefully..."); - return [4 /*yield*/, event.cleanup()]; - case 1: - _a.sent(); - process.exit(0); - return [2 /*return*/]; - } - }); -}); }); +exports.event = event; \ No newline at end of file diff --git a/client/Event.ts b/client/Event.ts index 569c82b..ec5d5e9 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -1,49 +1,148 @@ -import { Callback, InitOptions } from "./adapters/types"; +import { Socket, io } from "socket.io-client"; -import { EventAdapter } from "./adapters/Adapter"; -import { InMemoryAdapter } from "./adapters/InMemoryAdapter"; -import { KafkaAdapter } from "./adapters/KafkaAdapter"; +import { Kafka } from "kafkajs"; + +let socket: Socket | null = null; +let kafka: Kafka | null = null; +let kafkaGroupId: string | null = null; + +const callbacks: Record> = {}; + +interface BaseInitOptions { + type: "inMemory" | "socket" | "kafka"; +} + +interface InMemoryOptions extends BaseInitOptions { + type: "inMemory"; + host: string; + port?: number; + protocol: string; +} + +interface KafkaOptions extends BaseInitOptions { + type: "kafka"; + clientId: string; + brokers: string[]; + groupId: string; +} + +type InitOptions = InMemoryOptions | KafkaOptions; + +type Callback = (payload: T) => void; const event = { - async init(options: InitOptions) { - const adapter: EventAdapter = - options.type === "inMemory" ? new InMemoryAdapter() : new KafkaAdapter(); - (this as any)._adapter = adapter; - await adapter.init(options); + init(options: InitOptions) { + switch (options.type) { + case "inMemory": + if (!options.host) { + throw new Error("host is required for inMemory initialization"); + } + if (!options.protocol) { + throw new Error("protocol is required for inMemory initialization"); + } + + const { host, protocol } = options; + + const socketPath = options?.port + ? `${protocol}://${host}:${options.port}` + : `${protocol}://${host}`; + + socket = io(socketPath); + socket.on( + "event", + ({ type, payload }: { type: string; payload: any }) => { + if (callbacks[type]) { + callbacks[type].forEach((cb) => cb(payload)); + } + } + ); + break; + case "kafka": + if (!options.clientId) { + throw new Error("clientId is required for Kafka initialization"); + } + if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { + throw new Error("brokers array is required for Kafka initialization"); + } + if (!options.groupId) { + throw new Error("groupId is required for Kafka initialization"); + } + + kafka = new Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + kafkaGroupId = options.groupId; + break; + } }, async publish(...args: [...string[], T]): Promise { - const adapter: EventAdapter | undefined = (this as any)._adapter; - if (!adapter) throw new Error("Event not initialized"); - await adapter.publish(...args); + if (args.length < 2) { + throw new Error("publish requires at least one event type and a payload"); + } + + const payload = args[args.length - 1]; + const types = args.slice(0, -1) as string[]; + + if (socket) { + types.forEach((type) => { + socket!.emit("publish", { type, payload }); + }); + } else if (kafka) { + const producer = kafka!.producer(); + await producer.connect(); + + types.forEach((type) => { + producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + }); + + await producer.disconnect(); + } }, async subscribe( type: string, callback: Callback ): Promise<() => void> { - const adapter: EventAdapter | undefined = (this as any)._adapter; - if (!adapter) throw new Error("Event not initialized"); - return adapter.subscribe(type, callback as any); - }, + if (!callbacks[type]) callbacks[type] = new Set(); - async cleanup() { - const adapter: EventAdapter | undefined = (this as any)._adapter; - if (!adapter) return; - await adapter.cleanup(); + callbacks[type].add(callback as Callback); + + if (socket) { + socket!.emit("subscribe", type); + } else if (kafka) { + const consumer = kafka!.consumer({ groupId: kafkaGroupId! }); + await consumer.connect(); + await consumer.subscribe({ topic: type, fromBeginning: true }); + + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + if (callbacks[topic]) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + callbacks[topic].forEach((cb) => cb(payload)); + } catch (error) { + console.error(`Failed to parse message from topic ${topic}:`, error); + } + } + }, + }); + } + + return async () => { + callbacks[type].delete(callback as Callback); + if (callbacks[type].size === 0) { + delete callbacks[type]; + if (socket) { + socket.emit("unsubscribe", type); + } + } + }; }, }; -process.on("SIGINT", async () => { - console.log("Shutting down gracefully..."); - await event.cleanup(); - process.exit(0); -}); - -process.on("SIGTERM", async () => { - console.log("Shutting down gracefully..."); - await event.cleanup(); - process.exit(0); -}); - export { event }; diff --git a/client/adapters/Adapter.d.ts b/client/adapters/Adapter.d.ts deleted file mode 100644 index 3abf3dd..0000000 --- a/client/adapters/Adapter.d.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { Callback, InitOptions } from "./types"; -export interface EventAdapter { - init(options: InitOptions): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; - cleanup(): Promise; -} diff --git a/client/adapters/Adapter.js b/client/adapters/Adapter.js deleted file mode 100644 index c8ad2e5..0000000 --- a/client/adapters/Adapter.js +++ /dev/null @@ -1,2 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); diff --git a/client/adapters/Adapter.ts b/client/adapters/Adapter.ts deleted file mode 100644 index e4bf098..0000000 --- a/client/adapters/Adapter.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { Callback, InitOptions } from "./types"; - -export interface EventAdapter { - init(options: InitOptions): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; - cleanup(): Promise; -} - - diff --git a/client/adapters/InMemoryAdapter.d.ts b/client/adapters/InMemoryAdapter.d.ts deleted file mode 100644 index 5cadd1d..0000000 --- a/client/adapters/InMemoryAdapter.d.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { Callback, InitOptions } from "./types"; -import { EventAdapter } from "./Adapter"; -export declare class InMemoryAdapter implements EventAdapter { - private socket; - init(options: InitOptions): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; - cleanup(): Promise; -} diff --git a/client/adapters/InMemoryAdapter.js b/client/adapters/InMemoryAdapter.js deleted file mode 100644 index b54f942..0000000 --- a/client/adapters/InMemoryAdapter.js +++ /dev/null @@ -1,128 +0,0 @@ -"use strict"; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var __generator = (this && this.__generator) || function (thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype); - return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (g && (g = 0, op[0] && (_ = 0)), _) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.InMemoryAdapter = void 0; -var socket_io_client_1 = require("socket.io-client"); -var callbacks = {}; -var InMemoryAdapter = /** @class */ (function () { - function InMemoryAdapter() { - this.socket = null; - } - InMemoryAdapter.prototype.init = function (options) { - return __awaiter(this, void 0, void 0, function () { - var opts, host, protocol, socketPath; - return __generator(this, function (_a) { - opts = options; - if (!opts.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!opts.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - host = opts.host, protocol = opts.protocol; - socketPath = (opts === null || opts === void 0 ? void 0 : opts.port) ? "".concat(protocol, "://").concat(host, ":").concat(opts.port) : "".concat(protocol, "://").concat(host); - this.socket = (0, socket_io_client_1.io)(socketPath); - this.socket.on("event", function (_a) { - var type = _a.type, payload = _a.payload; - if (callbacks[type]) { - callbacks[type].forEach(function (cb) { return cb(payload); }); - } - }); - return [2 /*return*/]; - }); - }); - }; - InMemoryAdapter.prototype.publish = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return __awaiter(this, void 0, void 0, function () { - var payload, types; - var _this = this; - return __generator(this, function (_a) { - if (!this.socket) - return [2 /*return*/]; - payload = args[args.length - 1]; - types = args.slice(0, -1); - types.forEach(function (type) { - _this.socket.emit("publish", { type: type, payload: payload }); - }); - return [2 /*return*/]; - }); - }); - }; - InMemoryAdapter.prototype.subscribe = function (type, callback) { - return __awaiter(this, void 0, void 0, function () { - var _this = this; - return __generator(this, function (_a) { - if (!callbacks[type]) - callbacks[type] = new Set(); - callbacks[type].add(callback); - if (this.socket) { - this.socket.emit("subscribe", type); - } - return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { - return __generator(this, function (_a) { - callbacks[type].delete(callback); - if (callbacks[type].size === 0) { - delete callbacks[type]; - if (this.socket) { - this.socket.emit("unsubscribe", type); - } - } - return [2 /*return*/]; - }); - }); }]; - }); - }); - }; - InMemoryAdapter.prototype.cleanup = function () { - return __awaiter(this, void 0, void 0, function () { - return __generator(this, function (_a) { - if (this.socket) { - this.socket.disconnect(); - this.socket = null; - } - return [2 /*return*/]; - }); - }); - }; - return InMemoryAdapter; -}()); -exports.InMemoryAdapter = InMemoryAdapter; diff --git a/client/adapters/InMemoryAdapter.ts b/client/adapters/InMemoryAdapter.ts deleted file mode 100644 index 01c2ccd..0000000 --- a/client/adapters/InMemoryAdapter.ts +++ /dev/null @@ -1,65 +0,0 @@ -import { Callback, InMemoryOptions, InitOptions } from "./types"; -import { Socket, io } from "socket.io-client"; - -import { EventAdapter } from "./Adapter"; - -const callbacks: Record> = {}; - -export class InMemoryAdapter implements EventAdapter { - private socket: Socket | null = null; - - async init(options: InitOptions): Promise { - const opts = options as InMemoryOptions; - if (!opts.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!opts.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - - const { host, protocol } = opts; - const socketPath = opts?.port ? `${protocol}://${host}:${opts.port}` : `${protocol}://${host}`; - - this.socket = io(socketPath); - this.socket.on("event", ({ type, payload }: { type: string; payload: any }) => { - if (callbacks[type]) { - callbacks[type].forEach((cb) => cb(payload)); - } - }); - } - - async publish(...args: [...string[], T]): Promise { - if (!this.socket) return; - const payload = args[args.length - 1]; - const types = args.slice(0, -1) as string[]; - types.forEach((type) => { - this.socket!.emit("publish", { type, payload }); - }); - } - - async subscribe(type: string, callback: Callback): Promise<() => void> { - if (!callbacks[type]) callbacks[type] = new Set(); - callbacks[type].add(callback as Callback); - if (this.socket) { - this.socket.emit("subscribe", type); - } - return async () => { - callbacks[type].delete(callback as Callback); - if (callbacks[type].size === 0) { - delete callbacks[type]; - if (this.socket) { - this.socket.emit("unsubscribe", type); - } - } - }; - } - - async cleanup(): Promise { - if (this.socket) { - this.socket.disconnect(); - this.socket = null; - } - } -} - - diff --git a/client/adapters/KafkaAdapter.d.ts b/client/adapters/KafkaAdapter.d.ts deleted file mode 100644 index 43b86b4..0000000 --- a/client/adapters/KafkaAdapter.d.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Callback, InitOptions } from "./types"; -import { EventAdapter } from "./Adapter"; -export declare class KafkaAdapter implements EventAdapter { - private kafka; - private kafkaProducer; - private kafkaConsumers; - private kafkaGroupId; - init(options: InitOptions): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; - cleanup(): Promise; -} diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js deleted file mode 100644 index e5ea3bd..0000000 --- a/client/adapters/KafkaAdapter.js +++ /dev/null @@ -1,285 +0,0 @@ -"use strict"; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var __generator = (this && this.__generator) || function (thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype); - return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (g && (g = 0, op[0] && (_ = 0)), _) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.KafkaAdapter = void 0; -var kafkajs_1 = require("kafkajs"); -var callbacks = {}; -var KafkaAdapter = /** @class */ (function () { - function KafkaAdapter() { - this.kafka = null; - this.kafkaProducer = null; - this.kafkaConsumers = new Map(); - this.kafkaGroupId = null; - } - KafkaAdapter.prototype.init = function (options) { - return __awaiter(this, void 0, void 0, function () { - var opts; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - opts = options; - if (!opts.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if (!opts.brokers || !Array.isArray(opts.brokers) || opts.brokers.length === 0) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!opts.groupId) { - throw new Error("groupId is required for Kafka initialization"); - } - this.kafka = new kafkajs_1.Kafka({ - clientId: opts.clientId, - brokers: opts.brokers, - retry: { - initialRetryTime: 100, - retries: 8, - multiplier: 2, - maxRetryTime: 30000, - }, - connectionTimeout: 10000, - requestTimeout: 30000, - }); - this.kafkaGroupId = opts.groupId; - this.kafkaProducer = this.kafka.producer({ - allowAutoTopicCreation: true, - transactionTimeout: 30000, - }); - return [4 /*yield*/, this.kafkaProducer.connect()]; - case 1: - _a.sent(); - this.kafkaProducer.on("producer.disconnect", function () { - console.error("Producer disconnected"); - }); - return [2 /*return*/]; - } - }); - }); - }; - KafkaAdapter.prototype.publish = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return __awaiter(this, void 0, void 0, function () { - var payload, types, messages, error_1, reconnectError_1; - var _this = this; - var _a; - return __generator(this, function (_b) { - switch (_b.label) { - case 0: - if (!this.kafka || !this.kafkaProducer) - return [2 /*return*/]; - payload = args[args.length - 1]; - types = args.slice(0, -1); - _b.label = 1; - case 1: - _b.trys.push([1, 3, , 11]); - messages = types.map(function (type) { return ({ - topic: type, - messages: [ - { - value: JSON.stringify(payload), - timestamp: Date.now().toString(), - }, - ], - }); }); - return [4 /*yield*/, Promise.all(messages.map(function (msg) { return _this.kafkaProducer.send(msg); }))]; - case 2: - _b.sent(); - return [3 /*break*/, 11]; - case 3: - error_1 = _b.sent(); - console.error("Failed to publish to Kafka:", error_1); - if (!((_a = error_1.message) === null || _a === void 0 ? void 0 : _a.includes("disconnected"))) return [3 /*break*/, 9]; - _b.label = 4; - case 4: - _b.trys.push([4, 7, , 8]); - return [4 /*yield*/, this.kafkaProducer.connect()]; - case 5: - _b.sent(); - return [4 /*yield*/, this.publish.apply(this, args)]; - case 6: - _b.sent(); - return [3 /*break*/, 8]; - case 7: - reconnectError_1 = _b.sent(); - throw new Error("Failed to reconnect producer: ".concat(reconnectError_1.message)); - case 8: return [3 /*break*/, 10]; - case 9: throw error_1; - case 10: return [3 /*break*/, 11]; - case 11: return [2 /*return*/]; - } - }); - }); - }; - KafkaAdapter.prototype.subscribe = function (type, callback) { - return __awaiter(this, void 0, void 0, function () { - var consumer; - var _this = this; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - if (!callbacks[type]) - callbacks[type] = new Set(); - callbacks[type].add(callback); - if (!this.kafka) { - return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { - return __generator(this, function (_a) { - callbacks[type].delete(callback); - if (callbacks[type].size === 0) - delete callbacks[type]; - return [2 /*return*/]; - }); - }); }]; - } - if (!!this.kafkaConsumers.has(type)) return [3 /*break*/, 4]; - consumer = this.kafka.consumer({ - groupId: "".concat(this.kafkaGroupId, "-").concat(type), - sessionTimeout: 30000, - heartbeatInterval: 3000, - }); - return [4 /*yield*/, consumer.connect()]; - case 1: - _a.sent(); - return [4 /*yield*/, consumer.subscribe({ topic: type, fromBeginning: false })]; - case 2: - _a.sent(); - return [4 /*yield*/, consumer.run({ - autoCommit: true, - eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { - var payload_1; - var _c; - var topic = _b.topic, partition = _b.partition, message = _b.message; - return __generator(this, function (_d) { - if (callbacks[topic]) { - try { - payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); - callbacks[topic].forEach(function (cb) { return cb(payload_1); }); - } - catch (error) { - console.error("Failed to parse message from topic ".concat(topic, ":"), error); - } - } - return [2 /*return*/]; - }); - }); }, - })]; - case 3: - _a.sent(); - consumer.on("consumer.disconnect", function () { - console.error("Consumer for topic ".concat(type, " disconnected")); - _this.kafkaConsumers.delete(type); - }); - this.kafkaConsumers.set(type, consumer); - _a.label = 4; - case 4: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { - var consumer; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - callbacks[type].delete(callback); - if (!(callbacks[type].size === 0)) return [3 /*break*/, 2]; - delete callbacks[type]; - consumer = this.kafkaConsumers.get(type); - if (!consumer) return [3 /*break*/, 2]; - return [4 /*yield*/, consumer.disconnect()]; - case 1: - _a.sent(); - this.kafkaConsumers.delete(type); - _a.label = 2; - case 2: return [2 /*return*/]; - } - }); - }); }]; - } - }); - }); - }; - KafkaAdapter.prototype.cleanup = function () { - return __awaiter(this, void 0, void 0, function () { - var entries, _i, entries_1, _a, topic, consumer, error_2, error_3; - return __generator(this, function (_b) { - switch (_b.label) { - case 0: - entries = Array.from(this.kafkaConsumers.entries()); - _i = 0, entries_1 = entries; - _b.label = 1; - case 1: - if (!(_i < entries_1.length)) return [3 /*break*/, 6]; - _a = entries_1[_i], topic = _a[0], consumer = _a[1]; - _b.label = 2; - case 2: - _b.trys.push([2, 4, , 5]); - return [4 /*yield*/, consumer.disconnect()]; - case 3: - _b.sent(); - return [3 /*break*/, 5]; - case 4: - error_2 = _b.sent(); - console.error("Failed to disconnect consumer for ".concat(topic, ":"), error_2); - return [3 /*break*/, 5]; - case 5: - _i++; - return [3 /*break*/, 1]; - case 6: - this.kafkaConsumers.clear(); - if (!this.kafkaProducer) return [3 /*break*/, 11]; - _b.label = 7; - case 7: - _b.trys.push([7, 9, , 10]); - return [4 /*yield*/, this.kafkaProducer.disconnect()]; - case 8: - _b.sent(); - return [3 /*break*/, 10]; - case 9: - error_3 = _b.sent(); - console.error("Failed to disconnect producer:", error_3); - return [3 /*break*/, 10]; - case 10: - this.kafkaProducer = null; - _b.label = 11; - case 11: - this.kafka = null; - return [2 /*return*/]; - } - }); - }); - }; - return KafkaAdapter; -}()); -exports.KafkaAdapter = KafkaAdapter; diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts deleted file mode 100644 index 808d07c..0000000 --- a/client/adapters/KafkaAdapter.ts +++ /dev/null @@ -1,162 +0,0 @@ -import { Callback, InitOptions, KafkaOptions } from "./types"; -import { Consumer, Kafka, Producer } from "kafkajs"; - -import { EventAdapter } from "./Adapter"; - -const callbacks: Record> = {}; - -export class KafkaAdapter implements EventAdapter { - private kafka: Kafka | null = null; - private kafkaProducer: Producer | null = null; - private kafkaConsumers: Map = new Map(); - private kafkaGroupId: string | null = null; - - async init(options: InitOptions): Promise { - const opts = options as KafkaOptions; - if (!opts.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if (!opts.brokers || !Array.isArray(opts.brokers) || opts.brokers.length === 0) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!opts.groupId) { - throw new Error("groupId is required for Kafka initialization"); - } - - this.kafka = new Kafka({ - clientId: opts.clientId, - brokers: opts.brokers, - retry: { - initialRetryTime: 100, - retries: 8, - multiplier: 2, - maxRetryTime: 30000, - }, - connectionTimeout: 10000, - requestTimeout: 30000, - }); - - this.kafkaGroupId = opts.groupId; - - this.kafkaProducer = this.kafka.producer({ - allowAutoTopicCreation: true, - transactionTimeout: 30000, - }); - - await this.kafkaProducer.connect(); - this.kafkaProducer.on("producer.disconnect", () => { - console.error("Producer disconnected"); - }); - } - - async publish(...args: [...string[], T]): Promise { - if (!this.kafka || !this.kafkaProducer) return; - const payload = args[args.length - 1]; - const types = args.slice(0, -1) as string[]; - try { - const messages = types.map((type) => ({ - topic: type, - messages: [ - { - value: JSON.stringify(payload), - timestamp: Date.now().toString(), - }, - ], - })); - await Promise.all(messages.map((msg) => this.kafkaProducer!.send(msg))); - } catch (error: any) { - console.error("Failed to publish to Kafka:", error); - if (error.message?.includes("disconnected")) { - try { - await this.kafkaProducer.connect(); - await this.publish(...args); - } catch (reconnectError: any) { - throw new Error(`Failed to reconnect producer: ${reconnectError.message}`); - } - } else { - throw error; - } - } - } - - async subscribe(type: string, callback: Callback): Promise<() => void> { - if (!callbacks[type]) callbacks[type] = new Set(); - callbacks[type].add(callback as Callback); - - if (!this.kafka) { - return async () => { - callbacks[type].delete(callback as Callback); - if (callbacks[type].size === 0) delete callbacks[type]; - }; - } - - if (!this.kafkaConsumers.has(type)) { - const consumer = this.kafka.consumer({ - groupId: `${this.kafkaGroupId}-${type}`, - sessionTimeout: 30000, - heartbeatInterval: 3000, - }); - - await consumer.connect(); - await consumer.subscribe({ topic: type, fromBeginning: false }); - await consumer.run({ - autoCommit: true, - eachMessage: async ({ topic, partition, message }) => { - if (callbacks[topic]) { - try { - const payload = JSON.parse(message.value?.toString() || "{}"); - callbacks[topic].forEach((cb) => cb(payload)); - } catch (error) { - console.error(`Failed to parse message from topic ${topic}:`, error); - } - } - }, - }); - - consumer.on("consumer.disconnect", () => { - console.error(`Consumer for topic ${type} disconnected`); - this.kafkaConsumers.delete(type); - }); - - this.kafkaConsumers.set(type, consumer); - } - - return async () => { - callbacks[type].delete(callback as Callback); - if (callbacks[type].size === 0) { - delete callbacks[type]; - const consumer = this.kafkaConsumers.get(type); - if (consumer) { - await consumer.disconnect(); - this.kafkaConsumers.delete(type); - } - } - }; - } - - async cleanup(): Promise { - const entries = Array.from(this.kafkaConsumers.entries()); - for (const [topic, consumer] of entries) { - try { - await consumer.disconnect(); - } catch (error) { - console.error(`Failed to disconnect consumer for ${topic}:`, error); - } - } - this.kafkaConsumers.clear(); - - if (this.kafkaProducer) { - try { - await this.kafkaProducer.disconnect(); - } catch (error) { - console.error("Failed to disconnect producer:", error); - } - this.kafkaProducer = null; - } - - this.kafka = null; - } -} - - - diff --git a/client/adapters/types.d.ts b/client/adapters/types.d.ts deleted file mode 100644 index 29a057c..0000000 --- a/client/adapters/types.d.ts +++ /dev/null @@ -1,17 +0,0 @@ -export type Callback = (payload: T) => void; -export interface BaseInitOptions { - type: "inMemory" | "kafka"; -} -export interface InMemoryOptions extends BaseInitOptions { - type: "inMemory"; - host: string; - port?: number; - protocol: string; -} -export interface KafkaOptions extends BaseInitOptions { - type: "kafka"; - clientId: string; - brokers: string[]; - groupId: string; -} -export type InitOptions = InMemoryOptions | KafkaOptions; diff --git a/client/adapters/types.js b/client/adapters/types.js deleted file mode 100644 index c8ad2e5..0000000 --- a/client/adapters/types.js +++ /dev/null @@ -1,2 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); diff --git a/client/adapters/types.ts b/client/adapters/types.ts deleted file mode 100644 index 1911d9e..0000000 --- a/client/adapters/types.ts +++ /dev/null @@ -1,24 +0,0 @@ -export type Callback = (payload: T) => void; - -export interface BaseInitOptions { - type: "inMemory" | "kafka"; -} - -export interface InMemoryOptions extends BaseInitOptions { - type: "inMemory"; - host: string; - port?: number; - protocol: string; -} - -export interface KafkaOptions extends BaseInitOptions { - type: "kafka"; - clientId: string; - brokers: string[]; - groupId: string; -} - -export type InitOptions = InMemoryOptions | KafkaOptions; - - - diff --git a/package.json b/package.json index bb678d9..1635a89 100644 --- a/package.json +++ b/package.json @@ -1,15 +1,14 @@ { - "name": "@nucleoidai/node-event", - "version": "1.1.5", + "name": "node-event-test-package", + "version": "1.1.4", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ "event" ], "scripts": { - "test": "echo 'No tests'", "start": "node sample/publisher/index.js && node sample/subscriber/index.js && npx nuc-node-event-test/server", - "build": "npx tsc client/Event.ts --outDir client --declaration && npx tsc src/Event.ts --outDir src --declaration", + "build": "npx tsc client/Event.ts --outDir client --declaration", "kafka:up": "docker-compose up -d", "kafka:down": "docker-compose down", "kafka:test": "node examples/kafka-example.js" @@ -48,9 +47,9 @@ }, "exports": { ".": { - "import": "./src/Event.js", - "require": "./src/Event.js", - "types": "./src/Event.d.ts" + "import": "./src/Event.ts", + "require": "./src/Event.ts", + "types": "./src/Event.ts" }, "./client": { "import": "./client/Event.js", @@ -63,4 +62,4 @@ "types": "./server/server.d.ts" } } -} +} \ No newline at end of file diff --git a/src/Event.ts b/src/Event.ts index 1d8ed16..f43ae26 100644 --- a/src/Event.ts +++ b/src/Event.ts @@ -1,5 +1,4 @@ -import * as client from "prom-client"; - +import client from "prom-client"; import { v4 as uuid } from "uuid"; const subscriptions = {}; @@ -24,6 +23,7 @@ const eventPublishDuration = new client.Histogram({ buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], }); +// Track payload size for analysis const eventPayloadSize = new client.Histogram({ name: "event_payload_size_bytes", help: "Size of event payloads in bytes", @@ -31,12 +31,14 @@ const eventPayloadSize = new client.Histogram({ buckets: [10, 100, 1000, 10000, 100000, 1000000], }); +// Track error rates const eventPublishErrors = new client.Counter({ name: "event_publish_errors_total", help: "Total number of event publish errors", labelNames: ["event_type", "error_type"], }); +// Track callback processing duration const callbackProcessingDuration = new client.Histogram({ name: "event_callback_duration_seconds", help: "Time taken to process event callbacks", @@ -44,18 +46,21 @@ const callbackProcessingDuration = new client.Histogram({ buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], }); +// Track subscription rates const subscriptionRate = new client.Counter({ name: "event_subscriptions_total", help: "Total number of event subscriptions created", labelNames: ["event_type"], }); +// Track unsubscription rates const unsubscriptionRate = new client.Counter({ name: "event_unsubscriptions_total", help: "Total number of event unsubscriptions", labelNames: ["event_type"], }); +// Track throughput (events processed per second) const eventThroughput = new client.Counter({ name: "event_callbacks_processed_total", help: "Total number of event callbacks processed successfully", @@ -114,6 +119,7 @@ const subscribe = (...args) => { console.debug("node-event", "unsubscribe", type, id); delete subscriptions[type][id]; + // Track unsubscription unsubscriptionRate.labels(type).inc(); if (Object.keys(subscriptions[type]).length === 0) { @@ -129,6 +135,7 @@ const subscribe = (...args) => { subscriptions[type][id] = registry; + // Update subscription metrics subscriptionRate.labels(type).inc(); eventSubscriptionGauge .labels(type) @@ -152,9 +159,11 @@ const publish = (...args) => { throw new Error("Invalid publish type"); } + // Track metrics for event publishing const endTimer = eventPublishDuration.labels(type).startTimer(); eventPublishCounter.labels(type).inc(); + // Track payload size const payloadSize = JSON.stringify(payload).length; eventPayloadSize.labels(type).observe(payloadSize); From 865e34a801193720c71ecb5cc26a2e5770a4e6b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Wed, 10 Sep 2025 14:05:48 +0300 Subject: [PATCH 02/35] Add Prometheus metrics to event system Introduced various Prometheus metrics using prom-client to track event publishing, subscriptions, payload sizes, errors, callback durations, and throughput. Metrics are updated throughout the event publishing and subscription lifecycle to improve observability and monitoring. --- client/Event.ts | 165 ++++++++++++++++++++++++++++++++++++++++++------ package.json | 8 +-- 2 files changed, 151 insertions(+), 22 deletions(-) diff --git a/client/Event.ts b/client/Event.ts index ec5d5e9..1498287 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -1,7 +1,72 @@ +import * as client from "prom-client"; + import { Socket, io } from "socket.io-client"; import { Kafka } from "kafkajs"; +const eventPublishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], +}); + +const eventSubscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], +}); + +const eventPublishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], +}); + +// Track payload size for analysis +const eventPayloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], +}); + +// Track error rates +const eventPublishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], +}); + +// Track callback processing duration +const callbackProcessingDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], +}); + +// Track subscription rates +const subscriptionRate = new client.Counter({ + name: "event_subscriptions_total", + help: "Total number of event subscriptions created", + labelNames: ["event_type"], +}); + +// Track unsubscription rates +const unsubscriptionRate = new client.Counter({ + name: "event_unsubscriptions_total", + help: "Total number of event unsubscriptions", + labelNames: ["event_type"], +}); + +// Track throughput (events processed per second) +const eventThroughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], +}); + let socket: Socket | null = null; let kafka: Kafka | null = null; let kafkaGroupId: string | null = null; @@ -40,7 +105,7 @@ const event = { if (!options.protocol) { throw new Error("protocol is required for inMemory initialization"); } - + const { host, protocol } = options; const socketPath = options?.port @@ -61,13 +126,17 @@ const event = { if (!options.clientId) { throw new Error("clientId is required for Kafka initialization"); } - if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { + if ( + !options.brokers || + !Array.isArray(options.brokers) || + options.brokers.length === 0 + ) { throw new Error("brokers array is required for Kafka initialization"); } if (!options.groupId) { throw new Error("groupId is required for Kafka initialization"); } - + kafka = new Kafka({ clientId: options.clientId, brokers: options.brokers, @@ -85,22 +154,70 @@ const event = { const payload = args[args.length - 1]; const types = args.slice(0, -1) as string[]; - if (socket) { - types.forEach((type) => { - socket!.emit("publish", { type, payload }); - }); - } else if (kafka) { - const producer = kafka!.producer(); - await producer.connect(); + // Process each event type + for (const type of types) { + console.log("node-event", "publish", type, payload); - types.forEach((type) => { - producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); - }); + // Validation similar to sync version + if ( + type === "__proto__" || + type === "constructor" || + type === "prototype" + ) { + throw new Error("Invalid publish type"); + } + + // Track metrics for event publishing + const endTimer = eventPublishDuration.labels(type).startTimer(); + eventPublishCounter.labels(type).inc(); + + // Track payload size + const payloadSize = JSON.stringify(payload).length; + eventPayloadSize.labels(type).observe(payloadSize); + + try { + if (socket) { + socket!.emit("publish", { type, payload }); + } else if (kafka) { + const producer = kafka!.producer(); + await producer.connect(); + + await producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); - await producer.disconnect(); + await producer.disconnect(); + } + + // Process local callbacks if they exist + if (callbacks[type]) { + callbacks[type].forEach((callback) => { + setTimeout(() => { + const callbackTimer = callbackProcessingDuration + .labels(type) + .startTimer(); + try { + callback(payload); + eventThroughput.labels(type).inc(); + } catch (err) { + console.error("node-event", "error", type, err); + const errorName = + err instanceof Error ? err.name : "UnknownError"; + eventPublishErrors.labels(type, errorName).inc(); + } finally { + callbackTimer(); + } + }, 0); + }); + } + } catch (err) { + console.error("node-event", "error", type, err); + const errorName = err instanceof Error ? err.name : "UnknownError"; + eventPublishErrors.labels(type, errorName).inc(); + } finally { + endTimer(); + } } }, @@ -112,6 +229,9 @@ const event = { callbacks[type].add(callback as Callback); + subscriptionRate.labels(type).inc(); + eventSubscriptionGauge.labels(type).set(callbacks[type].size); + if (socket) { socket!.emit("subscribe", type); } else if (kafka) { @@ -126,7 +246,10 @@ const event = { const payload = JSON.parse(message.value?.toString() || "{}"); callbacks[topic].forEach((cb) => cb(payload)); } catch (error) { - console.error(`Failed to parse message from topic ${topic}:`, error); + console.error( + `Failed to parse message from topic ${topic}:`, + error + ); } } }, @@ -135,11 +258,17 @@ const event = { return async () => { callbacks[type].delete(callback as Callback); + + unsubscriptionRate.labels(type).inc(); + if (callbacks[type].size === 0) { delete callbacks[type]; + eventSubscriptionGauge.labels(type).set(0); if (socket) { socket.emit("unsubscribe", type); } + } else { + eventSubscriptionGauge.labels(type).set(callbacks[type].size); } }; }, diff --git a/package.json b/package.json index 1635a89..18abb55 100644 --- a/package.json +++ b/package.json @@ -47,9 +47,9 @@ }, "exports": { ".": { - "import": "./src/Event.ts", - "require": "./src/Event.ts", - "types": "./src/Event.ts" + "import": "./src/Event.js", + "require": "./src/Event.js", + "types": "./src/Event.d.ts" }, "./client": { "import": "./client/Event.js", @@ -62,4 +62,4 @@ "types": "./server/server.d.ts" } } -} \ No newline at end of file +} From 81a60290d8efa5d8f1ca7e049528b09fb3f700b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Wed, 10 Sep 2025 14:16:40 +0300 Subject: [PATCH 03/35] Export client from Event.ts Added 'client' to the exports in Event.ts to make it available for import in other modules. --- client/Event.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/Event.ts b/client/Event.ts index 1498287..d8dfd15 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -274,4 +274,4 @@ const event = { }, }; -export { event }; +export { event, client }; From 9702efd59d826bbe16167dc63572929628d839c8 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Mon, 15 Sep 2025 18:33:56 +0300 Subject: [PATCH 04/35] Add disconnect support and improve Kafka handling Introduces an async disconnect() method to the event API for both in-memory and Kafka transports, ensuring proper cleanup of resources. Refactors Kafka consumer logic to use a shared consumer and topic tracking, improving subscription management and resource usage. Updates type definitions and bumps package version to 1.1.8. --- client/Event.d.ts | 5 +- client/Event.js | 258 +++++++++++++++++++++++++++++++--------------- client/Event.ts | 216 ++++++++++++++++---------------------- package.json | 2 +- 4 files changed, 267 insertions(+), 214 deletions(-) diff --git a/client/Event.d.ts b/client/Event.d.ts index 97b69a7..06ff9cb 100644 --- a/client/Event.d.ts +++ b/client/Event.d.ts @@ -16,8 +16,9 @@ interface KafkaOptions extends BaseInitOptions { type InitOptions = InMemoryOptions | KafkaOptions; type Callback = (payload: T) => void; declare const event: { - init(options: InitOptions): void; + init(options: InitOptions): Promise; publish(...args: [...string[], T]): Promise; subscribe(type: string, callback: Callback): Promise<() => void>; + disconnect(): Promise; }; -export { event }; \ No newline at end of file +export { event }; diff --git a/client/Event.js b/client/Event.js index b1dec8a..79538c8 100644 --- a/client/Event.js +++ b/client/Event.js @@ -37,51 +37,70 @@ var __generator = (this && this.__generator) || function (thisArg, body) { }; Object.defineProperty(exports, "__esModule", { value: true }); exports.event = void 0; -var socket_io_client_1 = require("socket.io-client"); var kafkajs_1 = require("kafkajs"); +var socket_io_client_1 = require("socket.io-client"); var socket = null; var kafka = null; +var kafkaAdmin = null; var kafkaGroupId = null; +var activeConsumers = new Map(); var callbacks = {}; var event = { init: function (options) { - switch (options.type) { - case "inMemory": - if (!options.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!options.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - var host = options.host, protocol = options.protocol; - var socketPath = (options === null || options === void 0 ? void 0 : options.port) - ? "".concat(protocol, "://").concat(host, ":").concat(options.port) - : "".concat(protocol, "://").concat(host); - socket = (0, socket_io_client_1.io)(socketPath); - socket.on("event", function (_a) { - var type = _a.type, payload = _a.payload; - if (callbacks[type]) { - callbacks[type].forEach(function (cb) { return cb(payload); }); - } - }); - break; - case "kafka": - if (!options.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!options.groupId) { - throw new Error("groupId is required for Kafka initialization"); + return __awaiter(this, void 0, void 0, function () { + var _a, host, protocol, socketPath; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + _a = options.type; + switch (_a) { + case "inMemory": return [3 /*break*/, 1]; + case "kafka": return [3 /*break*/, 2]; + } + return [3 /*break*/, 4]; + case 1: + if (!options.host) { + throw new Error("host is required for inMemory initialization"); + } + if (!options.protocol) { + throw new Error("protocol is required for inMemory initialization"); + } + host = options.host, protocol = options.protocol; + socketPath = (options === null || options === void 0 ? void 0 : options.port) + ? "".concat(protocol, "://").concat(host, ":").concat(options.port) + : "".concat(protocol, "://").concat(host); + socket = (0, socket_io_client_1.io)(socketPath); + socket.on("event", function (_a) { + var type = _a.type, payload = _a.payload; + if (callbacks[type]) { + callbacks[type].forEach(function (cb) { return cb(payload); }); + } + }); + return [3 /*break*/, 4]; + case 2: + if (!options.clientId) { + throw new Error("clientId is required for Kafka initialization"); + } + if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { + throw new Error("brokers array is required for Kafka initialization"); + } + if (!options.groupId) { + throw new Error("groupId is required for Kafka initialization"); + } + kafka = new kafkajs_1.Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + kafkaGroupId = options.groupId; + kafkaAdmin = kafka.admin(); + return [4 /*yield*/, kafkaAdmin.connect()]; + case 3: + _b.sent(); + return [3 /*break*/, 4]; + case 4: return [2 /*return*/]; } - kafka = new kafkajs_1.Kafka({ - clientId: options.clientId, - brokers: options.brokers, - }); - kafkaGroupId = options.groupId; - break; - } + }); + }); }, publish: function () { var args = []; @@ -89,9 +108,9 @@ var event = { args[_i] = arguments[_i]; } return __awaiter(this, void 0, void 0, function () { - var payload, types, producer_1; - return __generator(this, function (_a) { - switch (_a.label) { + var payload, types, producer, _a, types_1, type; + return __generator(this, function (_b) { + switch (_b.label) { case 0: if (args.length < 2) { throw new Error("publish requires at least one event type and a payload"); @@ -102,24 +121,33 @@ var event = { types.forEach(function (type) { socket.emit("publish", { type: type, payload: payload }); }); - return [3 /*break*/, 4]; + return [3 /*break*/, 8]; case 1: - if (!kafka) return [3 /*break*/, 4]; - producer_1 = kafka.producer(); - return [4 /*yield*/, producer_1.connect()]; + if (!kafka) return [3 /*break*/, 8]; + producer = kafka.producer(); + return [4 /*yield*/, producer.connect()]; case 2: - _a.sent(); - types.forEach(function (type) { - producer_1.send({ + _b.sent(); + _a = 0, types_1 = types; + _b.label = 3; + case 3: + if (!(_a < types_1.length)) return [3 /*break*/, 6]; + type = types_1[_a]; + return [4 /*yield*/, producer.send({ topic: type, messages: [{ value: JSON.stringify(payload) }], - }); - }); - return [4 /*yield*/, producer_1.disconnect()]; - case 3: - _a.sent(); - _a.label = 4; - case 4: return [2 /*return*/]; + })]; + case 4: + _b.sent(); + _b.label = 5; + case 5: + _a++; + return [3 /*break*/, 3]; + case 6: return [4 /*yield*/, producer.disconnect()]; + case 7: + _b.sent(); + _b.label = 8; + case 8: return [2 /*return*/]; } }); }); @@ -136,51 +164,113 @@ var event = { callbacks[type].add(callback); if (!socket) return [3 /*break*/, 1]; socket.emit("subscribe", type); - return [3 /*break*/, 4]; + return [3 /*break*/, 5]; case 1: - if (!kafka) return [3 /*break*/, 4]; - consumer = kafka.consumer({ groupId: kafkaGroupId }); + if (!kafka) return [3 /*break*/, 5]; + consumer = activeConsumers.get(type); + if (!!consumer) return [3 /*break*/, 5]; + consumer = kafka.consumer({ + groupId: "".concat(kafkaGroupId, "-").concat(type), + }); return [4 /*yield*/, consumer.connect()]; case 2: _a.sent(); return [4 /*yield*/, consumer.subscribe({ topic: type, fromBeginning: true })]; case 3: _a.sent(); - consumer.run({ - eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { - var payload_1; - var _c; - var topic = _b.topic, partition = _b.partition, message = _b.message; - return __generator(this, function (_d) { - if (callbacks[topic]) { - try { - payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); - callbacks[topic].forEach(function (cb) { return cb(payload_1); }); + return [4 /*yield*/, consumer.run({ + eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { + var payload_1; + var _c; + var topic = _b.topic, partition = _b.partition, message = _b.message; + return __generator(this, function (_d) { + if (callbacks[topic]) { + try { + payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); + callbacks[topic].forEach(function (cb) { return cb(payload_1); }); + } + catch (error) { + console.error("Failed to parse message from topic ".concat(topic, ":"), error); + } } - catch (error) { - console.error("Failed to parse message from topic ".concat(topic, ":"), error); - } - } - return [2 /*return*/]; - }); - }); }, - }); - _a.label = 4; - case 4: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { + return [2 /*return*/]; + }); + }); }, + })]; + case 4: + _a.sent(); + activeConsumers.set(type, consumer); + _a.label = 5; + case 5: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { + var consumer; return __generator(this, function (_a) { - callbacks[type].delete(callback); - if (callbacks[type].size === 0) { - delete callbacks[type]; - if (socket) { + switch (_a.label) { + case 0: + callbacks[type].delete(callback); + if (!(callbacks[type].size === 0)) return [3 /*break*/, 3]; + delete callbacks[type]; + if (!socket) return [3 /*break*/, 1]; socket.emit("unsubscribe", type); - } + return [3 /*break*/, 3]; + case 1: + if (!kafka) return [3 /*break*/, 3]; + consumer = activeConsumers.get(type); + if (!consumer) return [3 /*break*/, 3]; + return [4 /*yield*/, consumer.disconnect()]; + case 2: + _a.sent(); + activeConsumers.delete(type); + _a.label = 3; + case 3: return [2 /*return*/]; } - return [2 /*return*/]; }); }); }]; } }); }); }, + disconnect: function () { + return __awaiter(this, void 0, void 0, function () { + var consumers, _i, consumers_1, _a, topic, consumer; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (!socket) return [3 /*break*/, 1]; + socket.disconnect(); + socket = null; + return [3 /*break*/, 8]; + case 1: + if (!kafka) return [3 /*break*/, 8]; + consumers = Array.from(activeConsumers.entries()); + _i = 0, consumers_1 = consumers; + _b.label = 2; + case 2: + if (!(_i < consumers_1.length)) return [3 /*break*/, 5]; + _a = consumers_1[_i], topic = _a[0], consumer = _a[1]; + return [4 /*yield*/, consumer.disconnect()]; + case 3: + _b.sent(); + _b.label = 4; + case 4: + _i++; + return [3 /*break*/, 2]; + case 5: + activeConsumers.clear(); + if (!kafkaAdmin) return [3 /*break*/, 7]; + return [4 /*yield*/, kafkaAdmin.disconnect()]; + case 6: + _b.sent(); + kafkaAdmin = null; + _b.label = 7; + case 7: + kafka = null; + _b.label = 8; + case 8: + Object.keys(callbacks).forEach(function (key) { return delete callbacks[key]; }); + return [2 /*return*/]; + } + }); + }); + } }; -exports.event = event; \ No newline at end of file +exports.event = event; diff --git a/client/Event.ts b/client/Event.ts index d8dfd15..c498935 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -1,8 +1,38 @@ import * as client from "prom-client"; +import { Consumer, Kafka } from "kafkajs"; import { Socket, io } from "socket.io-client"; -import { Kafka } from "kafkajs"; +let socket: Socket | null = null; +let kafka: Kafka | null = null; +let kafkaGroupId: string | null = null; +let sharedConsumer: Consumer | null = null; +let subscribedTopics: Set = new Set(); + +const callbacks: Record> = {}; + + +interface BaseInitOptions { + type: "inMemory" | "socket" | "kafka"; +} + +interface InMemoryOptions extends BaseInitOptions { + type: "inMemory"; + host: string; + port?: number; + protocol: string; +} + +interface KafkaOptions extends BaseInitOptions { + type: "kafka"; + clientId: string; + brokers: string[]; + groupId: string; +} + +type InitOptions = InMemoryOptions | KafkaOptions; + +type Callback = (payload: T) => void; const eventPublishCounter = new client.Counter({ name: "events_published_total", @@ -60,41 +90,13 @@ const unsubscriptionRate = new client.Counter({ labelNames: ["event_type"], }); -// Track throughput (events processed per second) + // Track throughput (events processed per second) const eventThroughput = new client.Counter({ name: "event_callbacks_processed_total", help: "Total number of event callbacks processed successfully", labelNames: ["event_type"], }); -let socket: Socket | null = null; -let kafka: Kafka | null = null; -let kafkaGroupId: string | null = null; - -const callbacks: Record> = {}; - -interface BaseInitOptions { - type: "inMemory" | "socket" | "kafka"; -} - -interface InMemoryOptions extends BaseInitOptions { - type: "inMemory"; - host: string; - port?: number; - protocol: string; -} - -interface KafkaOptions extends BaseInitOptions { - type: "kafka"; - clientId: string; - brokers: string[]; - groupId: string; -} - -type InitOptions = InMemoryOptions | KafkaOptions; - -type Callback = (payload: T) => void; - const event = { init(options: InitOptions) { switch (options.type) { @@ -105,7 +107,7 @@ const event = { if (!options.protocol) { throw new Error("protocol is required for inMemory initialization"); } - + const { host, protocol } = options; const socketPath = options?.port @@ -126,17 +128,13 @@ const event = { if (!options.clientId) { throw new Error("clientId is required for Kafka initialization"); } - if ( - !options.brokers || - !Array.isArray(options.brokers) || - options.brokers.length === 0 - ) { + if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { throw new Error("brokers array is required for Kafka initialization"); } if (!options.groupId) { throw new Error("groupId is required for Kafka initialization"); } - + kafka = new Kafka({ clientId: options.clientId, brokers: options.brokers, @@ -154,70 +152,21 @@ const event = { const payload = args[args.length - 1]; const types = args.slice(0, -1) as string[]; - // Process each event type - for (const type of types) { - console.log("node-event", "publish", type, payload); - - // Validation similar to sync version - if ( - type === "__proto__" || - type === "constructor" || - type === "prototype" - ) { - throw new Error("Invalid publish type"); - } - - // Track metrics for event publishing - const endTimer = eventPublishDuration.labels(type).startTimer(); - eventPublishCounter.labels(type).inc(); - - // Track payload size - const payloadSize = JSON.stringify(payload).length; - eventPayloadSize.labels(type).observe(payloadSize); - - try { - if (socket) { - socket!.emit("publish", { type, payload }); - } else if (kafka) { - const producer = kafka!.producer(); - await producer.connect(); - - await producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); + if (socket) { + types.forEach((type) => { + socket!.emit("publish", { type, payload }); + }); + } else if (kafka) { + const producer = kafka!.producer(); + await producer.connect(); - await producer.disconnect(); - } + const messages = types.map(type => ({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + })); - // Process local callbacks if they exist - if (callbacks[type]) { - callbacks[type].forEach((callback) => { - setTimeout(() => { - const callbackTimer = callbackProcessingDuration - .labels(type) - .startTimer(); - try { - callback(payload); - eventThroughput.labels(type).inc(); - } catch (err) { - console.error("node-event", "error", type, err); - const errorName = - err instanceof Error ? err.name : "UnknownError"; - eventPublishErrors.labels(type, errorName).inc(); - } finally { - callbackTimer(); - } - }, 0); - }); - } - } catch (err) { - console.error("node-event", "error", type, err); - const errorName = err instanceof Error ? err.name : "UnknownError"; - eventPublishErrors.labels(type, errorName).inc(); - } finally { - endTimer(); - } + await Promise.all(messages.map(msg => producer.send(msg))); + await producer.disconnect(); } }, @@ -229,49 +178,62 @@ const event = { callbacks[type].add(callback as Callback); - subscriptionRate.labels(type).inc(); - eventSubscriptionGauge.labels(type).set(callbacks[type].size); - if (socket) { socket!.emit("subscribe", type); } else if (kafka) { - const consumer = kafka!.consumer({ groupId: kafkaGroupId! }); - await consumer.connect(); - await consumer.subscribe({ topic: type, fromBeginning: true }); - - consumer.run({ - eachMessage: async ({ topic, partition, message }) => { - if (callbacks[topic]) { - try { - const payload = JSON.parse(message.value?.toString() || "{}"); - callbacks[topic].forEach((cb) => cb(payload)); - } catch (error) { - console.error( - `Failed to parse message from topic ${topic}:`, - error - ); + if (!sharedConsumer) { + sharedConsumer = kafka!.consumer({ groupId: kafkaGroupId! }); + await sharedConsumer.connect(); + + await sharedConsumer.run({ + eachMessage: async ({ topic, partition, message }) => { + if (callbacks[topic]) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + callbacks[topic].forEach((cb) => cb(payload)); + } catch (error) { + console.error(`Failed to parse message from topic ${topic}:`, error); + } } - } - }, - }); + }, + }); + } + + if (!subscribedTopics.has(type)) { + await sharedConsumer.subscribe({ topic: type, fromBeginning: false }); + subscribedTopics.add(type); + } } return async () => { callbacks[type].delete(callback as Callback); - - unsubscriptionRate.labels(type).inc(); - if (callbacks[type].size === 0) { delete callbacks[type]; - eventSubscriptionGauge.labels(type).set(0); if (socket) { socket.emit("unsubscribe", type); } - } else { - eventSubscriptionGauge.labels(type).set(callbacks[type].size); } }; }, + + + + async disconnect() { + if (socket) { + socket.disconnect(); + socket = null; + } else if (kafka) { + if (sharedConsumer) { + await sharedConsumer.disconnect(); + sharedConsumer = null; + } + + subscribedTopics.clear(); + kafka = null; + } + + Object.keys(callbacks).forEach(key => delete callbacks[key]); + } }; -export { event, client }; +export { event }; \ No newline at end of file diff --git a/package.json b/package.json index 18abb55..42d031d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.4", + "version": "1.1.8", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 8cff07ae5fc8bd259284a98a4d526f626a4052f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Mon, 15 Sep 2025 18:42:41 +0300 Subject: [PATCH 05/35] Refactor event publish and subscribe logic Improved event publishing by validating event types, tracking metrics, and refactoring Kafka and socket handling. Enhanced subscription management with better metric updates and streamlined callback execution. Cleaned up formatting and removed unnecessary code. --- client/Event.ts | 100 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 68 insertions(+), 32 deletions(-) diff --git a/client/Event.ts b/client/Event.ts index c498935..690f851 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -11,7 +11,6 @@ let subscribedTopics: Set = new Set(); const callbacks: Record> = {}; - interface BaseInitOptions { type: "inMemory" | "socket" | "kafka"; } @@ -90,7 +89,7 @@ const unsubscriptionRate = new client.Counter({ labelNames: ["event_type"], }); - // Track throughput (events processed per second) +// Track throughput (events processed per second) const eventThroughput = new client.Counter({ name: "event_callbacks_processed_total", help: "Total number of event callbacks processed successfully", @@ -107,7 +106,7 @@ const event = { if (!options.protocol) { throw new Error("protocol is required for inMemory initialization"); } - + const { host, protocol } = options; const socketPath = options?.port @@ -128,13 +127,17 @@ const event = { if (!options.clientId) { throw new Error("clientId is required for Kafka initialization"); } - if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { + if ( + !options.brokers || + !Array.isArray(options.brokers) || + options.brokers.length === 0 + ) { throw new Error("brokers array is required for Kafka initialization"); } if (!options.groupId) { throw new Error("groupId is required for Kafka initialization"); } - + kafka = new Kafka({ clientId: options.clientId, brokers: options.brokers, @@ -152,21 +155,52 @@ const event = { const payload = args[args.length - 1]; const types = args.slice(0, -1) as string[]; - if (socket) { - types.forEach((type) => { + for (const type of types) { + console.log("node-event", "publish", type, payload); + + if ( + type === "__proto__" || + type === "constructor" || + type === "prototype" + ) { + throw new Error("Invalid publish type"); + } + + const endTimer = eventPublishDuration.labels(type).startTimer(); + eventPublishCounter.labels(type).inc(); + + const payloadSize = JSON.stringify(payload).length; + eventPayloadSize.labels(type).observe(payloadSize); + + if (socket) { socket!.emit("publish", { type, payload }); - }); - } else if (kafka) { - const producer = kafka!.producer(); - await producer.connect(); + } else if (kafka) { + const producer = kafka!.producer(); + await producer.connect(); - const messages = types.map(type => ({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - })); + await producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + + await producer.disconnect(); + } - await Promise.all(messages.map(msg => producer.send(msg))); - await producer.disconnect(); + if (callbacks[type]) { + callbacks[type].forEach((callback) => { + setTimeout(() => { + const callbackTimer = callbackProcessingDuration + .labels(type) + .startTimer(); + + callback(payload); + eventThroughput.labels(type).inc(); + callbackTimer(); + }, 0); + }); + } + + endTimer(); } }, @@ -178,27 +212,25 @@ const event = { callbacks[type].add(callback as Callback); + subscriptionRate.labels(type).inc(); + eventSubscriptionGauge.labels(type).set(callbacks[type].size); + if (socket) { socket!.emit("subscribe", type); } else if (kafka) { if (!sharedConsumer) { sharedConsumer = kafka!.consumer({ groupId: kafkaGroupId! }); await sharedConsumer.connect(); - await sharedConsumer.run({ eachMessage: async ({ topic, partition, message }) => { if (callbacks[topic]) { - try { - const payload = JSON.parse(message.value?.toString() || "{}"); - callbacks[topic].forEach((cb) => cb(payload)); - } catch (error) { - console.error(`Failed to parse message from topic ${topic}:`, error); - } + const payload = JSON.parse(message.value?.toString() || "{}"); + callbacks[topic].forEach((cb) => cb(payload)); } }, }); } - + if (!subscribedTopics.has(type)) { await sharedConsumer.subscribe({ topic: type, fromBeginning: false }); subscribedTopics.add(type); @@ -207,17 +239,21 @@ const event = { return async () => { callbacks[type].delete(callback as Callback); + + unsubscriptionRate.labels(type).inc(); + if (callbacks[type].size === 0) { delete callbacks[type]; + eventSubscriptionGauge.labels(type).set(0); if (socket) { socket.emit("unsubscribe", type); } + } else { + eventSubscriptionGauge.labels(type).set(callbacks[type].size); } }; }, - - async disconnect() { if (socket) { socket.disconnect(); @@ -227,13 +263,13 @@ const event = { await sharedConsumer.disconnect(); sharedConsumer = null; } - + subscribedTopics.clear(); kafka = null; } - - Object.keys(callbacks).forEach(key => delete callbacks[key]); - } + + Object.keys(callbacks).forEach((key) => delete callbacks[key]); + }, }; -export { event }; \ No newline at end of file +export { event }; From b0fb64862c9d9bb6e4c402012c8b0bbe7af4c0c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Tue, 16 Sep 2025 12:17:24 +0300 Subject: [PATCH 06/35] Add Kafka backlog monitoring and metrics Introduces periodic monitoring of Kafka topic backlogs, exposing a Prometheus gauge for the number of unprocessed events per topic. Adds methods to start, stop, and manually check backlog monitoring, and ensures metrics are updated on topic subscription and disconnect. Also refactors some Kafka producer/consumer usage for clarity. --- client/Event.ts | 114 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 109 insertions(+), 5 deletions(-) diff --git a/client/Event.ts b/client/Event.ts index 690f851..1c1a180 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -8,6 +8,7 @@ let kafka: Kafka | null = null; let kafkaGroupId: string | null = null; let sharedConsumer: Consumer | null = null; let subscribedTopics: Set = new Set(); +let backlogMonitoringInterval: NodeJS.Timeout | null = null; const callbacks: Record> = {}; @@ -96,6 +97,59 @@ const eventThroughput = new client.Counter({ labelNames: ["event_type"], }); +const kafkaBacklogSize = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], +}); + +// Function to update Kafka backlog metrics +const updateKafkaBacklogMetrics = async () => { + if (!kafka || !kafkaGroupId || subscribedTopics.size === 0) return; + + const admin = kafka.admin(); + await admin.connect(); + + for (const topic of subscribedTopics) { + // Get consumer group offsets + const offsetsResponse = await admin.fetchOffsets({ + groupId: kafkaGroupId, + topics: [topic], + }); + + // Get latest offsets for the topic + const topicOffsets = await admin.fetchTopicOffsets(topic); + + let totalLag = 0; + + // The response structure is: [{ topic: string, partitions: FetchOffsetsPartition[] }] + const topicResponse = offsetsResponse.find( + (response) => response.topic === topic + ); + + if (topicResponse) { + // Calculate lag for each partition + topicResponse.partitions.forEach((partitionOffset) => { + const latestOffset = topicOffsets.find( + (to) => to.partition === partitionOffset.partition + ); + + if (latestOffset) { + const consumerOffset = parseInt(partitionOffset.offset); + const latestOffsetValue = parseInt(latestOffset.offset); + const lag = Math.max(0, latestOffsetValue - consumerOffset); + totalLag += lag; + } + }); + } + + kafkaBacklogSize.labels(topic).set(totalLag); + console.log(`Backlog for topic ${topic}: ${totalLag} messages`); + } + + await admin.disconnect(); +}; + const event = { init(options: InitOptions) { switch (options.type) { @@ -143,10 +197,44 @@ const event = { brokers: options.brokers, }); kafkaGroupId = options.groupId; + + // Start backlog monitoring after Kafka initialization + event.startBacklogMonitoring(); break; } }, + // Start backlog monitoring + startBacklogMonitoring(intervalMs: number = 30000) { + if (kafka && !backlogMonitoringInterval) { + console.log("Starting Kafka backlog monitoring..."); + + // Run once immediately + updateKafkaBacklogMetrics(); + + // Set up periodic monitoring + backlogMonitoringInterval = setInterval(() => { + updateKafkaBacklogMetrics(); + }, intervalMs); + } + }, + + // Stop backlog monitoring + stopBacklogMonitoring() { + if (backlogMonitoringInterval) { + clearInterval(backlogMonitoringInterval); + backlogMonitoringInterval = null; + console.log("Stopped Kafka backlog monitoring"); + } + }, + + // Manual backlog check + async checkBacklog(): Promise { + if (kafka) { + await updateKafkaBacklogMetrics(); + } + }, + async publish(...args: [...string[], T]): Promise { if (args.length < 2) { throw new Error("publish requires at least one event type and a payload"); @@ -173,9 +261,9 @@ const event = { eventPayloadSize.labels(type).observe(payloadSize); if (socket) { - socket!.emit("publish", { type, payload }); + socket.emit("publish", { type, payload }); } else if (kafka) { - const producer = kafka!.producer(); + const producer = kafka.producer(); await producer.connect(); await producer.send({ @@ -216,16 +304,24 @@ const event = { eventSubscriptionGauge.labels(type).set(callbacks[type].size); if (socket) { - socket!.emit("subscribe", type); + socket.emit("subscribe", type); } else if (kafka) { if (!sharedConsumer) { - sharedConsumer = kafka!.consumer({ groupId: kafkaGroupId! }); + sharedConsumer = kafka.consumer({ groupId: kafkaGroupId! }); await sharedConsumer.connect(); await sharedConsumer.run({ eachMessage: async ({ topic, partition, message }) => { if (callbacks[topic]) { const payload = JSON.parse(message.value?.toString() || "{}"); - callbacks[topic].forEach((cb) => cb(payload)); + callbacks[topic].forEach((cb) => { + const callbackTimer = callbackProcessingDuration + .labels(topic) + .startTimer(); + + cb(payload); + eventThroughput.labels(topic).inc(); + callbackTimer(); + }); } }, }); @@ -234,6 +330,11 @@ const event = { if (!subscribedTopics.has(type)) { await sharedConsumer.subscribe({ topic: type, fromBeginning: false }); subscribedTopics.add(type); + + // Update backlog metrics immediately after subscribing to a new topic + setTimeout(() => { + updateKafkaBacklogMetrics(); + }, 1000); } } @@ -255,6 +356,8 @@ const event = { }, async disconnect() { + event.stopBacklogMonitoring(); + if (socket) { socket.disconnect(); socket = null; @@ -266,6 +369,7 @@ const event = { subscribedTopics.clear(); kafka = null; + kafkaGroupId = null; } Object.keys(callbacks).forEach((key) => delete callbacks[key]); From e7a3d2b1133b179e77c44e169302d4a602d47f2a Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 16 Sep 2025 12:25:14 +0300 Subject: [PATCH 07/35] Refactor Kafka consumer management and add metrics Replaces per-topic Kafka consumers with a single shared consumer that dynamically subscribes to topics and restarts as needed. Adds Prometheus metrics for event publishing, subscriptions, callback processing, payload size, error rates, and throughput. Updates API to export Prometheus client and adds restartKafkaConsumer method. Bumps package version to 1.1.11. --- client/Event.d.ts | 6 +- client/Event.js | 400 +++++++++++++++++++++++++++++----------------- client/Event.ts | 86 ++++++---- package.json | 2 +- 4 files changed, 311 insertions(+), 183 deletions(-) diff --git a/client/Event.d.ts b/client/Event.d.ts index 06ff9cb..f97b813 100644 --- a/client/Event.d.ts +++ b/client/Event.d.ts @@ -1,3 +1,4 @@ +import * as client from "prom-client"; interface BaseInitOptions { type: "inMemory" | "socket" | "kafka"; } @@ -16,9 +17,10 @@ interface KafkaOptions extends BaseInitOptions { type InitOptions = InMemoryOptions | KafkaOptions; type Callback = (payload: T) => void; declare const event: { - init(options: InitOptions): Promise; + init(options: InitOptions): void; publish(...args: [...string[], T]): Promise; subscribe(type: string, callback: Callback): Promise<() => void>; + restartKafkaConsumer(): Promise; disconnect(): Promise; }; -export { event }; +export { event, client }; diff --git a/client/Event.js b/client/Event.js index 79538c8..7caf580 100644 --- a/client/Event.js +++ b/client/Event.js @@ -36,71 +36,113 @@ var __generator = (this && this.__generator) || function (thisArg, body) { } }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.event = void 0; +exports.client = exports.event = void 0; +var client = require("prom-client"); +exports.client = client; var kafkajs_1 = require("kafkajs"); var socket_io_client_1 = require("socket.io-client"); var socket = null; var kafka = null; -var kafkaAdmin = null; var kafkaGroupId = null; -var activeConsumers = new Map(); +var sharedConsumer = null; +var subscribedTopics = new Set(); +var isConsumerRunning = false; var callbacks = {}; +var eventPublishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], +}); +var eventSubscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], +}); +var eventPublishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], +}); +// Track payload size for analysis +var eventPayloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], +}); +// Track error rates +var eventPublishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], +}); +// Track callback processing duration +var callbackProcessingDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], +}); +// Track subscription rates +var subscriptionRate = new client.Counter({ + name: "event_subscriptions_total", + help: "Total number of event subscriptions created", + labelNames: ["event_type"], +}); +// Track unsubscription rates +var unsubscriptionRate = new client.Counter({ + name: "event_unsubscriptions_total", + help: "Total number of event unsubscriptions", + labelNames: ["event_type"], +}); +// Track throughput (events processed per second) +var eventThroughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], +}); var event = { init: function (options) { - return __awaiter(this, void 0, void 0, function () { - var _a, host, protocol, socketPath; - return __generator(this, function (_b) { - switch (_b.label) { - case 0: - _a = options.type; - switch (_a) { - case "inMemory": return [3 /*break*/, 1]; - case "kafka": return [3 /*break*/, 2]; - } - return [3 /*break*/, 4]; - case 1: - if (!options.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!options.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - host = options.host, protocol = options.protocol; - socketPath = (options === null || options === void 0 ? void 0 : options.port) - ? "".concat(protocol, "://").concat(host, ":").concat(options.port) - : "".concat(protocol, "://").concat(host); - socket = (0, socket_io_client_1.io)(socketPath); - socket.on("event", function (_a) { - var type = _a.type, payload = _a.payload; - if (callbacks[type]) { - callbacks[type].forEach(function (cb) { return cb(payload); }); - } - }); - return [3 /*break*/, 4]; - case 2: - if (!options.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if (!options.brokers || !Array.isArray(options.brokers) || options.brokers.length === 0) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!options.groupId) { - throw new Error("groupId is required for Kafka initialization"); - } - kafka = new kafkajs_1.Kafka({ - clientId: options.clientId, - brokers: options.brokers, - }); - kafkaGroupId = options.groupId; - kafkaAdmin = kafka.admin(); - return [4 /*yield*/, kafkaAdmin.connect()]; - case 3: - _b.sent(); - return [3 /*break*/, 4]; - case 4: return [2 /*return*/]; + switch (options.type) { + case "inMemory": + if (!options.host) { + throw new Error("host is required for inMemory initialization"); } - }); - }); + if (!options.protocol) { + throw new Error("protocol is required for inMemory initialization"); + } + var host = options.host, protocol = options.protocol; + var socketPath = (options === null || options === void 0 ? void 0 : options.port) + ? "".concat(protocol, "://").concat(host, ":").concat(options.port) + : "".concat(protocol, "://").concat(host); + socket = (0, socket_io_client_1.io)(socketPath); + socket.on("event", function (_a) { + var type = _a.type, payload = _a.payload; + if (callbacks[type]) { + callbacks[type].forEach(function (cb) { return cb(payload); }); + } + }); + break; + case "kafka": + if (!options.clientId) { + throw new Error("clientId is required for Kafka initialization"); + } + if (!options.brokers || + !Array.isArray(options.brokers) || + options.brokers.length === 0) { + throw new Error("brokers array is required for Kafka initialization"); + } + if (!options.groupId) { + throw new Error("groupId is required for Kafka initialization"); + } + kafka = new kafkajs_1.Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + kafkaGroupId = options.groupId; + break; + } }, publish: function () { var args = []; @@ -108,7 +150,7 @@ var event = { args[_i] = arguments[_i]; } return __awaiter(this, void 0, void 0, function () { - var payload, types, producer, _a, types_1, type; + var payload, types, _loop_1, _a, types_1, type; return __generator(this, function (_b) { switch (_b.label) { case 0: @@ -117,44 +159,78 @@ var event = { } payload = args[args.length - 1]; types = args.slice(0, -1); - if (!socket) return [3 /*break*/, 1]; - types.forEach(function (type) { - socket.emit("publish", { type: type, payload: payload }); - }); - return [3 /*break*/, 8]; + _loop_1 = function (type) { + var endTimer, payloadSize, producer; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: + console.log("node-event", "publish", type, payload); + if (type === "__proto__" || + type === "constructor" || + type === "prototype") { + throw new Error("Invalid publish type"); + } + endTimer = eventPublishDuration.labels(type).startTimer(); + eventPublishCounter.labels(type).inc(); + payloadSize = JSON.stringify(payload).length; + eventPayloadSize.labels(type).observe(payloadSize); + if (!socket) return [3 /*break*/, 1]; + socket.emit("publish", { type: type, payload: payload }); + return [3 /*break*/, 5]; + case 1: + if (!kafka) return [3 /*break*/, 5]; + producer = kafka.producer(); + return [4 /*yield*/, producer.connect()]; + case 2: + _c.sent(); + return [4 /*yield*/, producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + })]; + case 3: + _c.sent(); + return [4 /*yield*/, producer.disconnect()]; + case 4: + _c.sent(); + _c.label = 5; + case 5: + if (callbacks[type]) { + callbacks[type].forEach(function (callback) { + setTimeout(function () { + var callbackTimer = callbackProcessingDuration + .labels(type) + .startTimer(); + callback(payload); + eventThroughput.labels(type).inc(); + callbackTimer(); + }, 0); + }); + } + endTimer(); + return [2 /*return*/]; + } + }); + }; + _a = 0, types_1 = types; + _b.label = 1; case 1: - if (!kafka) return [3 /*break*/, 8]; - producer = kafka.producer(); - return [4 /*yield*/, producer.connect()]; + if (!(_a < types_1.length)) return [3 /*break*/, 4]; + type = types_1[_a]; + return [5 /*yield**/, _loop_1(type)]; case 2: _b.sent(); - _a = 0, types_1 = types; _b.label = 3; case 3: - if (!(_a < types_1.length)) return [3 /*break*/, 6]; - type = types_1[_a]; - return [4 /*yield*/, producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - })]; - case 4: - _b.sent(); - _b.label = 5; - case 5: _a++; - return [3 /*break*/, 3]; - case 6: return [4 /*yield*/, producer.disconnect()]; - case 7: - _b.sent(); - _b.label = 8; - case 8: return [2 /*return*/]; + return [3 /*break*/, 1]; + case 4: return [2 /*return*/]; } }); }); }, subscribe: function (type, callback) { return __awaiter(this, void 0, void 0, function () { - var consumer; + var wasNewTopic; var _this = this; return __generator(this, function (_a) { switch (_a.label) { @@ -162,115 +238,139 @@ var event = { if (!callbacks[type]) callbacks[type] = new Set(); callbacks[type].add(callback); + subscriptionRate.labels(type).inc(); + eventSubscriptionGauge.labels(type).set(callbacks[type].size); if (!socket) return [3 /*break*/, 1]; socket.emit("subscribe", type); - return [3 /*break*/, 5]; + return [3 /*break*/, 3]; case 1: - if (!kafka) return [3 /*break*/, 5]; - consumer = activeConsumers.get(type); - if (!!consumer) return [3 /*break*/, 5]; - consumer = kafka.consumer({ - groupId: "".concat(kafkaGroupId, "-").concat(type), - }); - return [4 /*yield*/, consumer.connect()]; + if (!kafka) return [3 /*break*/, 3]; + wasNewTopic = !subscribedTopics.has(type); + if (!wasNewTopic) return [3 /*break*/, 3]; + subscribedTopics.add(type); + return [4 /*yield*/, this.restartKafkaConsumer()]; + case 2: + _a.sent(); + _a.label = 3; + case 3: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { + return __generator(this, function (_a) { + callbacks[type].delete(callback); + unsubscriptionRate.labels(type).inc(); + if (callbacks[type].size === 0) { + delete callbacks[type]; + eventSubscriptionGauge.labels(type).set(0); + if (socket) { + socket.emit("unsubscribe", type); + } + } + else { + eventSubscriptionGauge.labels(type).set(callbacks[type].size); + } + return [2 /*return*/]; + }); + }); }]; + } + }); + }); + }, + restartKafkaConsumer: function () { + return __awaiter(this, void 0, void 0, function () { + var _this = this; + return __generator(this, function (_a) { + switch (_a.label) { + case 0: + if (!kafka || subscribedTopics.size === 0) + return [2 /*return*/]; + if (!(sharedConsumer && isConsumerRunning)) return [3 /*break*/, 3]; + console.log("Stopping existing Kafka consumer..."); + return [4 /*yield*/, sharedConsumer.stop()]; + case 1: + _a.sent(); + return [4 /*yield*/, sharedConsumer.disconnect()]; case 2: _a.sent(); - return [4 /*yield*/, consumer.subscribe({ topic: type, fromBeginning: true })]; + sharedConsumer = null; + isConsumerRunning = false; + _a.label = 3; case 3: + console.log("Starting Kafka consumer with topics: ".concat(Array.from(subscribedTopics).join(", "))); + sharedConsumer = kafka.consumer({ groupId: kafkaGroupId }); + return [4 /*yield*/, sharedConsumer.connect()]; + case 4: + _a.sent(); + return [4 /*yield*/, sharedConsumer.subscribe({ + topics: Array.from(subscribedTopics), + fromBeginning: false + })]; + case 5: _a.sent(); - return [4 /*yield*/, consumer.run({ + return [4 /*yield*/, sharedConsumer.run({ + partitionsConsumedConcurrently: 10, eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { - var payload_1; + var payload_1, callbackTimer; var _c; var topic = _b.topic, partition = _b.partition, message = _b.message; return __generator(this, function (_d) { if (callbacks[topic]) { try { payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); - callbacks[topic].forEach(function (cb) { return cb(payload_1); }); + callbackTimer = callbackProcessingDuration + .labels(topic) + .startTimer(); + callbacks[topic].forEach(function (cb) { + cb(payload_1); + eventThroughput.labels(topic).inc(); + }); + callbackTimer(); } catch (error) { - console.error("Failed to parse message from topic ".concat(topic, ":"), error); + console.error("Error processing message for topic ".concat(topic, ":"), error); + eventPublishErrors.labels(topic, "processing_error").inc(); } } return [2 /*return*/]; }); }); }, })]; - case 4: + case 6: _a.sent(); - activeConsumers.set(type, consumer); - _a.label = 5; - case 5: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { - var consumer; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - callbacks[type].delete(callback); - if (!(callbacks[type].size === 0)) return [3 /*break*/, 3]; - delete callbacks[type]; - if (!socket) return [3 /*break*/, 1]; - socket.emit("unsubscribe", type); - return [3 /*break*/, 3]; - case 1: - if (!kafka) return [3 /*break*/, 3]; - consumer = activeConsumers.get(type); - if (!consumer) return [3 /*break*/, 3]; - return [4 /*yield*/, consumer.disconnect()]; - case 2: - _a.sent(); - activeConsumers.delete(type); - _a.label = 3; - case 3: return [2 /*return*/]; - } - }); - }); }]; + isConsumerRunning = true; + return [2 /*return*/]; } }); }); }, disconnect: function () { return __awaiter(this, void 0, void 0, function () { - var consumers, _i, consumers_1, _a, topic, consumer; - return __generator(this, function (_b) { - switch (_b.label) { + return __generator(this, function (_a) { + switch (_a.label) { case 0: if (!socket) return [3 /*break*/, 1]; socket.disconnect(); socket = null; - return [3 /*break*/, 8]; + return [3 /*break*/, 5]; case 1: - if (!kafka) return [3 /*break*/, 8]; - consumers = Array.from(activeConsumers.entries()); - _i = 0, consumers_1 = consumers; - _b.label = 2; + if (!kafka) return [3 /*break*/, 5]; + if (!(sharedConsumer && isConsumerRunning)) return [3 /*break*/, 4]; + return [4 /*yield*/, sharedConsumer.stop()]; case 2: - if (!(_i < consumers_1.length)) return [3 /*break*/, 5]; - _a = consumers_1[_i], topic = _a[0], consumer = _a[1]; - return [4 /*yield*/, consumer.disconnect()]; + _a.sent(); + return [4 /*yield*/, sharedConsumer.disconnect()]; case 3: - _b.sent(); - _b.label = 4; + _a.sent(); + sharedConsumer = null; + isConsumerRunning = false; + _a.label = 4; case 4: - _i++; - return [3 /*break*/, 2]; - case 5: - activeConsumers.clear(); - if (!kafkaAdmin) return [3 /*break*/, 7]; - return [4 /*yield*/, kafkaAdmin.disconnect()]; - case 6: - _b.sent(); - kafkaAdmin = null; - _b.label = 7; - case 7: + subscribedTopics.clear(); kafka = null; - _b.label = 8; - case 8: + _a.label = 5; + case 5: Object.keys(callbacks).forEach(function (key) { return delete callbacks[key]; }); return [2 /*return*/]; } }); }); - } + }, }; exports.event = event; diff --git a/client/Event.ts b/client/Event.ts index 1c1a180..db60c16 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -9,6 +9,7 @@ let kafkaGroupId: string | null = null; let sharedConsumer: Consumer | null = null; let subscribedTopics: Set = new Set(); let backlogMonitoringInterval: NodeJS.Timeout | null = null; +let isConsumerRunning = false; const callbacks: Record> = {}; @@ -306,35 +307,11 @@ const event = { if (socket) { socket.emit("subscribe", type); } else if (kafka) { - if (!sharedConsumer) { - sharedConsumer = kafka.consumer({ groupId: kafkaGroupId! }); - await sharedConsumer.connect(); - await sharedConsumer.run({ - eachMessage: async ({ topic, partition, message }) => { - if (callbacks[topic]) { - const payload = JSON.parse(message.value?.toString() || "{}"); - callbacks[topic].forEach((cb) => { - const callbackTimer = callbackProcessingDuration - .labels(topic) - .startTimer(); - - cb(payload); - eventThroughput.labels(topic).inc(); - callbackTimer(); - }); - } - }, - }); - } - - if (!subscribedTopics.has(type)) { - await sharedConsumer.subscribe({ topic: type, fromBeginning: false }); + const wasNewTopic = !subscribedTopics.has(type); + if (wasNewTopic) { subscribedTopics.add(type); - - // Update backlog metrics immediately after subscribing to a new topic - setTimeout(() => { - updateKafkaBacklogMetrics(); - }, 1000); + + await this.restartKafkaConsumer(); } } @@ -355,6 +332,53 @@ const event = { }; }, + async restartKafkaConsumer() { + if (!kafka || subscribedTopics.size === 0) return; + + if (sharedConsumer && isConsumerRunning) { + console.log("Stopping existing Kafka consumer..."); + await sharedConsumer.stop(); + await sharedConsumer.disconnect(); + sharedConsumer = null; + isConsumerRunning = false; + } + + console.log(`Starting Kafka consumer with topics: ${Array.from(subscribedTopics).join(", ")}`); + sharedConsumer = kafka.consumer({ groupId: kafkaGroupId! }); + await sharedConsumer.connect(); + + await sharedConsumer.subscribe({ + topics: Array.from(subscribedTopics), + fromBeginning: false + }); + + await sharedConsumer.run({ + partitionsConsumedConcurrently: 10, + eachMessage: async ({ topic, partition, message }) => { + if (callbacks[topic]) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + const callbackTimer = callbackProcessingDuration + .labels(topic) + .startTimer(); + + callbacks[topic].forEach((cb) => { + cb(payload); + eventThroughput.labels(topic).inc(); + }); + + callbackTimer(); + } catch (error) { + console.error(`Error processing message for topic ${topic}:`, error); + eventPublishErrors.labels(topic, "processing_error").inc(); + } + } + }, + }); + + isConsumerRunning = true; + }, + async disconnect() { event.stopBacklogMonitoring(); @@ -362,9 +386,11 @@ const event = { socket.disconnect(); socket = null; } else if (kafka) { - if (sharedConsumer) { + if (sharedConsumer && isConsumerRunning) { + await sharedConsumer.stop(); await sharedConsumer.disconnect(); sharedConsumer = null; + isConsumerRunning = false; } subscribedTopics.clear(); @@ -376,4 +402,4 @@ const event = { }, }; -export { event }; +export { event, client }; diff --git a/package.json b/package.json index 42d031d..a7b73aa 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.8", + "version": "1.1.11", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From ae4924dad001aec733e9b98d55c06f9bd4bbbb24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Tue, 16 Sep 2025 12:36:57 +0300 Subject: [PATCH 08/35] Update Event.ts --- client/Event.ts | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/client/Event.ts b/client/Event.ts index db60c16..605a083 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -273,6 +273,8 @@ const event = { }); await producer.disconnect(); + + setTimeout(() => updateKafkaBacklogMetrics(), 500); } if (callbacks[type]) { @@ -310,8 +312,11 @@ const event = { const wasNewTopic = !subscribedTopics.has(type); if (wasNewTopic) { subscribedTopics.add(type); - await this.restartKafkaConsumer(); + + setTimeout(() => { + updateKafkaBacklogMetrics(); + }, 1000); } } @@ -343,13 +348,17 @@ const event = { isConsumerRunning = false; } - console.log(`Starting Kafka consumer with topics: ${Array.from(subscribedTopics).join(", ")}`); + console.log( + `Starting Kafka consumer with topics: ${Array.from(subscribedTopics).join( + ", " + )}` + ); sharedConsumer = kafka.consumer({ groupId: kafkaGroupId! }); await sharedConsumer.connect(); - - await sharedConsumer.subscribe({ - topics: Array.from(subscribedTopics), - fromBeginning: false + + await sharedConsumer.subscribe({ + topics: Array.from(subscribedTopics), + fromBeginning: false, }); await sharedConsumer.run({ @@ -361,21 +370,24 @@ const event = { const callbackTimer = callbackProcessingDuration .labels(topic) .startTimer(); - + callbacks[topic].forEach((cb) => { cb(payload); eventThroughput.labels(topic).inc(); }); - + callbackTimer(); } catch (error) { - console.error(`Error processing message for topic ${topic}:`, error); + console.error( + `Error processing message for topic ${topic}:`, + error + ); eventPublishErrors.labels(topic, "processing_error").inc(); } } }, }); - + isConsumerRunning = true; }, From ec0032082e8aafcb619d8c553b8a94ae5bab47a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Tue, 16 Sep 2025 12:41:52 +0300 Subject: [PATCH 09/35] Fix iteration over subscribedTopics in Kafka metrics Wraps subscribedTopics in Array.from() to ensure proper iteration, likely addressing cases where subscribedTopics is a Set or non-array iterable. --- client/Event.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/Event.ts b/client/Event.ts index 605a083..0174a39 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -111,7 +111,7 @@ const updateKafkaBacklogMetrics = async () => { const admin = kafka.admin(); await admin.connect(); - for (const topic of subscribedTopics) { + for (const topic of Array.from(subscribedTopics)) { // Get consumer group offsets const offsetsResponse = await admin.fetchOffsets({ groupId: kafkaGroupId, From 2fa2cc000f62a10126e3744a3fe4b30275eb992b Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Thu, 25 Sep 2025 10:27:50 +0300 Subject: [PATCH 10/35] Add Kafka backlog monitoring to event client Introduces functions to monitor Kafka backlog size, including periodic and manual checks, and exposes metrics for events waiting to be processed. Also adjusts consumer concurrency and updates package version. --- client/Event.d.ts | 3 ++ client/Event.js | 125 ++++++++++++++++++++++++++++++++++++++++++++-- client/Event.ts | 2 +- package.json | 2 +- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/client/Event.d.ts b/client/Event.d.ts index f97b813..e9808c5 100644 --- a/client/Event.d.ts +++ b/client/Event.d.ts @@ -18,6 +18,9 @@ type InitOptions = InMemoryOptions | KafkaOptions; type Callback = (payload: T) => void; declare const event: { init(options: InitOptions): void; + startBacklogMonitoring(intervalMs?: number): void; + stopBacklogMonitoring(): void; + checkBacklog(): Promise; publish(...args: [...string[], T]): Promise; subscribe(type: string, callback: Callback): Promise<() => void>; restartKafkaConsumer(): Promise; diff --git a/client/Event.js b/client/Event.js index 7caf580..4777a6d 100644 --- a/client/Event.js +++ b/client/Event.js @@ -46,6 +46,7 @@ var kafka = null; var kafkaGroupId = null; var sharedConsumer = null; var subscribedTopics = new Set(); +var backlogMonitoringInterval = null; var isConsumerRunning = false; var callbacks = {}; var eventPublishCounter = new client.Counter({ @@ -102,6 +103,75 @@ var eventThroughput = new client.Counter({ help: "Total number of event callbacks processed successfully", labelNames: ["event_type"], }); +var kafkaBacklogSize = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], +}); +// Function to update Kafka backlog metrics +var updateKafkaBacklogMetrics = function () { return __awaiter(void 0, void 0, void 0, function () { + var admin, _loop_1, _i, _a, topic; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (!kafka || !kafkaGroupId || subscribedTopics.size === 0) + return [2 /*return*/]; + admin = kafka.admin(); + return [4 /*yield*/, admin.connect()]; + case 1: + _b.sent(); + _loop_1 = function (topic) { + var offsetsResponse, topicOffsets, totalLag, topicResponse; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: return [4 /*yield*/, admin.fetchOffsets({ + groupId: kafkaGroupId, + topics: [topic], + })]; + case 1: + offsetsResponse = _c.sent(); + return [4 /*yield*/, admin.fetchTopicOffsets(topic)]; + case 2: + topicOffsets = _c.sent(); + totalLag = 0; + topicResponse = offsetsResponse.find(function (response) { return response.topic === topic; }); + if (topicResponse) { + // Calculate lag for each partition + topicResponse.partitions.forEach(function (partitionOffset) { + var latestOffset = topicOffsets.find(function (to) { return to.partition === partitionOffset.partition; }); + if (latestOffset) { + var consumerOffset = parseInt(partitionOffset.offset); + var latestOffsetValue = parseInt(latestOffset.offset); + var lag = Math.max(0, latestOffsetValue - consumerOffset); + totalLag += lag; + } + }); + } + kafkaBacklogSize.labels(topic).set(totalLag); + console.log("Backlog for topic ".concat(topic, ": ").concat(totalLag, " messages")); + return [2 /*return*/]; + } + }); + }; + _i = 0, _a = Array.from(subscribedTopics); + _b.label = 2; + case 2: + if (!(_i < _a.length)) return [3 /*break*/, 5]; + topic = _a[_i]; + return [5 /*yield**/, _loop_1(topic)]; + case 3: + _b.sent(); + _b.label = 4; + case 4: + _i++; + return [3 /*break*/, 2]; + case 5: return [4 /*yield*/, admin.disconnect()]; + case 6: + _b.sent(); + return [2 /*return*/]; + } + }); +}); }; var event = { init: function (options) { switch (options.type) { @@ -141,16 +211,55 @@ var event = { brokers: options.brokers, }); kafkaGroupId = options.groupId; + // Start backlog monitoring after Kafka initialization + event.startBacklogMonitoring(); break; } }, + // Start backlog monitoring + startBacklogMonitoring: function (intervalMs) { + if (intervalMs === void 0) { intervalMs = 30000; } + if (kafka && !backlogMonitoringInterval) { + console.log("Starting Kafka backlog monitoring..."); + // Run once immediately + updateKafkaBacklogMetrics(); + // Set up periodic monitoring + backlogMonitoringInterval = setInterval(function () { + updateKafkaBacklogMetrics(); + }, intervalMs); + } + }, + // Stop backlog monitoring + stopBacklogMonitoring: function () { + if (backlogMonitoringInterval) { + clearInterval(backlogMonitoringInterval); + backlogMonitoringInterval = null; + console.log("Stopped Kafka backlog monitoring"); + } + }, + // Manual backlog check + checkBacklog: function () { + return __awaiter(this, void 0, void 0, function () { + return __generator(this, function (_a) { + switch (_a.label) { + case 0: + if (!kafka) return [3 /*break*/, 2]; + return [4 /*yield*/, updateKafkaBacklogMetrics()]; + case 1: + _a.sent(); + _a.label = 2; + case 2: return [2 /*return*/]; + } + }); + }); + }, publish: function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } return __awaiter(this, void 0, void 0, function () { - var payload, types, _loop_1, _a, types_1, type; + var payload, types, _loop_2, _a, types_1, type; return __generator(this, function (_b) { switch (_b.label) { case 0: @@ -159,7 +268,7 @@ var event = { } payload = args[args.length - 1]; types = args.slice(0, -1); - _loop_1 = function (type) { + _loop_2 = function (type) { var endTimer, payloadSize, producer; return __generator(this, function (_c) { switch (_c.label) { @@ -192,6 +301,7 @@ var event = { return [4 /*yield*/, producer.disconnect()]; case 4: _c.sent(); + setTimeout(function () { return updateKafkaBacklogMetrics(); }, 500); _c.label = 5; case 5: if (callbacks[type]) { @@ -216,7 +326,7 @@ var event = { case 1: if (!(_a < types_1.length)) return [3 /*break*/, 4]; type = types_1[_a]; - return [5 /*yield**/, _loop_1(type)]; + return [5 /*yield**/, _loop_2(type)]; case 2: _b.sent(); _b.label = 3; @@ -251,6 +361,9 @@ var event = { return [4 /*yield*/, this.restartKafkaConsumer()]; case 2: _a.sent(); + setTimeout(function () { + updateKafkaBacklogMetrics(); + }, 1000); _a.label = 3; case 3: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { return __generator(this, function (_a) { @@ -300,12 +413,12 @@ var event = { _a.sent(); return [4 /*yield*/, sharedConsumer.subscribe({ topics: Array.from(subscribedTopics), - fromBeginning: false + fromBeginning: false, })]; case 5: _a.sent(); return [4 /*yield*/, sharedConsumer.run({ - partitionsConsumedConcurrently: 10, + partitionsConsumedConcurrently: 1, eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { var payload_1, callbackTimer; var _c; @@ -345,6 +458,7 @@ var event = { return __generator(this, function (_a) { switch (_a.label) { case 0: + event.stopBacklogMonitoring(); if (!socket) return [3 /*break*/, 1]; socket.disconnect(); socket = null; @@ -364,6 +478,7 @@ var event = { case 4: subscribedTopics.clear(); kafka = null; + kafkaGroupId = null; _a.label = 5; case 5: Object.keys(callbacks).forEach(function (key) { return delete callbacks[key]; }); diff --git a/client/Event.ts b/client/Event.ts index 0174a39..b434234 100644 --- a/client/Event.ts +++ b/client/Event.ts @@ -362,7 +362,7 @@ const event = { }); await sharedConsumer.run({ - partitionsConsumedConcurrently: 10, + partitionsConsumedConcurrently: 1, eachMessage: async ({ topic, partition, message }) => { if (callbacks[topic]) { try { diff --git a/package.json b/package.json index a7b73aa..ed7f9e0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.11", + "version": "1.1.15", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 9d70807b49e0d5d1fde9b587f7f316546776449a Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Fri, 26 Sep 2025 10:25:05 +0300 Subject: [PATCH 11/35] Refactor event client to modular adapter architecture Replaces monolithic Event implementation with modular EventManager, KafkaAdapter, and SocketAdapter. Event logic and metrics are separated into dedicated files, improving maintainability and extensibility. Updates package.json exports to use new entry points. Removes legacy Event files. --- client/Event.d.ts | 29 -- client/Event.js | 491 ------------------------------- client/Event.ts | 417 -------------------------- client/adapters/KafkaAdapter.ts | 163 ++++++++++ client/adapters/SocketAdapter.ts | 59 ++++ client/eventManager.ts | 179 +++++++++++ client/index.ts | 31 ++ client/metrics.ts | 77 +++++ client/types/types.ts | 30 ++ package.json | 6 +- 10 files changed, 542 insertions(+), 940 deletions(-) delete mode 100644 client/Event.d.ts delete mode 100644 client/Event.js delete mode 100644 client/Event.ts create mode 100644 client/adapters/KafkaAdapter.ts create mode 100644 client/adapters/SocketAdapter.ts create mode 100644 client/eventManager.ts create mode 100644 client/index.ts create mode 100644 client/metrics.ts create mode 100644 client/types/types.ts diff --git a/client/Event.d.ts b/client/Event.d.ts deleted file mode 100644 index e9808c5..0000000 --- a/client/Event.d.ts +++ /dev/null @@ -1,29 +0,0 @@ -import * as client from "prom-client"; -interface BaseInitOptions { - type: "inMemory" | "socket" | "kafka"; -} -interface InMemoryOptions extends BaseInitOptions { - type: "inMemory"; - host: string; - port?: number; - protocol: string; -} -interface KafkaOptions extends BaseInitOptions { - type: "kafka"; - clientId: string; - brokers: string[]; - groupId: string; -} -type InitOptions = InMemoryOptions | KafkaOptions; -type Callback = (payload: T) => void; -declare const event: { - init(options: InitOptions): void; - startBacklogMonitoring(intervalMs?: number): void; - stopBacklogMonitoring(): void; - checkBacklog(): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; - restartKafkaConsumer(): Promise; - disconnect(): Promise; -}; -export { event, client }; diff --git a/client/Event.js b/client/Event.js deleted file mode 100644 index 4777a6d..0000000 --- a/client/Event.js +++ /dev/null @@ -1,491 +0,0 @@ -"use strict"; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var __generator = (this && this.__generator) || function (thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype); - return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (g && (g = 0, op[0] && (_ = 0)), _) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.client = exports.event = void 0; -var client = require("prom-client"); -exports.client = client; -var kafkajs_1 = require("kafkajs"); -var socket_io_client_1 = require("socket.io-client"); -var socket = null; -var kafka = null; -var kafkaGroupId = null; -var sharedConsumer = null; -var subscribedTopics = new Set(); -var backlogMonitoringInterval = null; -var isConsumerRunning = false; -var callbacks = {}; -var eventPublishCounter = new client.Counter({ - name: "events_published_total", - help: "Total number of events published", - labelNames: ["event_type"], -}); -var eventSubscriptionGauge = new client.Gauge({ - name: "active_event_subscriptions", - help: "Number of active event subscriptions", - labelNames: ["event_type"], -}); -var eventPublishDuration = new client.Histogram({ - name: "event_publish_duration_seconds", - help: "Time taken to publish events", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], -}); -// Track payload size for analysis -var eventPayloadSize = new client.Histogram({ - name: "event_payload_size_bytes", - help: "Size of event payloads in bytes", - labelNames: ["event_type"], - buckets: [10, 100, 1000, 10000, 100000, 1000000], -}); -// Track error rates -var eventPublishErrors = new client.Counter({ - name: "event_publish_errors_total", - help: "Total number of event publish errors", - labelNames: ["event_type", "error_type"], -}); -// Track callback processing duration -var callbackProcessingDuration = new client.Histogram({ - name: "event_callback_duration_seconds", - help: "Time taken to process event callbacks", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], -}); -// Track subscription rates -var subscriptionRate = new client.Counter({ - name: "event_subscriptions_total", - help: "Total number of event subscriptions created", - labelNames: ["event_type"], -}); -// Track unsubscription rates -var unsubscriptionRate = new client.Counter({ - name: "event_unsubscriptions_total", - help: "Total number of event unsubscriptions", - labelNames: ["event_type"], -}); -// Track throughput (events processed per second) -var eventThroughput = new client.Counter({ - name: "event_callbacks_processed_total", - help: "Total number of event callbacks processed successfully", - labelNames: ["event_type"], -}); -var kafkaBacklogSize = new client.Gauge({ - name: "kafka_backlog_events_total", - help: "Total number of events waiting to be processed", - labelNames: ["topic"], -}); -// Function to update Kafka backlog metrics -var updateKafkaBacklogMetrics = function () { return __awaiter(void 0, void 0, void 0, function () { - var admin, _loop_1, _i, _a, topic; - return __generator(this, function (_b) { - switch (_b.label) { - case 0: - if (!kafka || !kafkaGroupId || subscribedTopics.size === 0) - return [2 /*return*/]; - admin = kafka.admin(); - return [4 /*yield*/, admin.connect()]; - case 1: - _b.sent(); - _loop_1 = function (topic) { - var offsetsResponse, topicOffsets, totalLag, topicResponse; - return __generator(this, function (_c) { - switch (_c.label) { - case 0: return [4 /*yield*/, admin.fetchOffsets({ - groupId: kafkaGroupId, - topics: [topic], - })]; - case 1: - offsetsResponse = _c.sent(); - return [4 /*yield*/, admin.fetchTopicOffsets(topic)]; - case 2: - topicOffsets = _c.sent(); - totalLag = 0; - topicResponse = offsetsResponse.find(function (response) { return response.topic === topic; }); - if (topicResponse) { - // Calculate lag for each partition - topicResponse.partitions.forEach(function (partitionOffset) { - var latestOffset = topicOffsets.find(function (to) { return to.partition === partitionOffset.partition; }); - if (latestOffset) { - var consumerOffset = parseInt(partitionOffset.offset); - var latestOffsetValue = parseInt(latestOffset.offset); - var lag = Math.max(0, latestOffsetValue - consumerOffset); - totalLag += lag; - } - }); - } - kafkaBacklogSize.labels(topic).set(totalLag); - console.log("Backlog for topic ".concat(topic, ": ").concat(totalLag, " messages")); - return [2 /*return*/]; - } - }); - }; - _i = 0, _a = Array.from(subscribedTopics); - _b.label = 2; - case 2: - if (!(_i < _a.length)) return [3 /*break*/, 5]; - topic = _a[_i]; - return [5 /*yield**/, _loop_1(topic)]; - case 3: - _b.sent(); - _b.label = 4; - case 4: - _i++; - return [3 /*break*/, 2]; - case 5: return [4 /*yield*/, admin.disconnect()]; - case 6: - _b.sent(); - return [2 /*return*/]; - } - }); -}); }; -var event = { - init: function (options) { - switch (options.type) { - case "inMemory": - if (!options.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!options.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - var host = options.host, protocol = options.protocol; - var socketPath = (options === null || options === void 0 ? void 0 : options.port) - ? "".concat(protocol, "://").concat(host, ":").concat(options.port) - : "".concat(protocol, "://").concat(host); - socket = (0, socket_io_client_1.io)(socketPath); - socket.on("event", function (_a) { - var type = _a.type, payload = _a.payload; - if (callbacks[type]) { - callbacks[type].forEach(function (cb) { return cb(payload); }); - } - }); - break; - case "kafka": - if (!options.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if (!options.brokers || - !Array.isArray(options.brokers) || - options.brokers.length === 0) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!options.groupId) { - throw new Error("groupId is required for Kafka initialization"); - } - kafka = new kafkajs_1.Kafka({ - clientId: options.clientId, - brokers: options.brokers, - }); - kafkaGroupId = options.groupId; - // Start backlog monitoring after Kafka initialization - event.startBacklogMonitoring(); - break; - } - }, - // Start backlog monitoring - startBacklogMonitoring: function (intervalMs) { - if (intervalMs === void 0) { intervalMs = 30000; } - if (kafka && !backlogMonitoringInterval) { - console.log("Starting Kafka backlog monitoring..."); - // Run once immediately - updateKafkaBacklogMetrics(); - // Set up periodic monitoring - backlogMonitoringInterval = setInterval(function () { - updateKafkaBacklogMetrics(); - }, intervalMs); - } - }, - // Stop backlog monitoring - stopBacklogMonitoring: function () { - if (backlogMonitoringInterval) { - clearInterval(backlogMonitoringInterval); - backlogMonitoringInterval = null; - console.log("Stopped Kafka backlog monitoring"); - } - }, - // Manual backlog check - checkBacklog: function () { - return __awaiter(this, void 0, void 0, function () { - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - if (!kafka) return [3 /*break*/, 2]; - return [4 /*yield*/, updateKafkaBacklogMetrics()]; - case 1: - _a.sent(); - _a.label = 2; - case 2: return [2 /*return*/]; - } - }); - }); - }, - publish: function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return __awaiter(this, void 0, void 0, function () { - var payload, types, _loop_2, _a, types_1, type; - return __generator(this, function (_b) { - switch (_b.label) { - case 0: - if (args.length < 2) { - throw new Error("publish requires at least one event type and a payload"); - } - payload = args[args.length - 1]; - types = args.slice(0, -1); - _loop_2 = function (type) { - var endTimer, payloadSize, producer; - return __generator(this, function (_c) { - switch (_c.label) { - case 0: - console.log("node-event", "publish", type, payload); - if (type === "__proto__" || - type === "constructor" || - type === "prototype") { - throw new Error("Invalid publish type"); - } - endTimer = eventPublishDuration.labels(type).startTimer(); - eventPublishCounter.labels(type).inc(); - payloadSize = JSON.stringify(payload).length; - eventPayloadSize.labels(type).observe(payloadSize); - if (!socket) return [3 /*break*/, 1]; - socket.emit("publish", { type: type, payload: payload }); - return [3 /*break*/, 5]; - case 1: - if (!kafka) return [3 /*break*/, 5]; - producer = kafka.producer(); - return [4 /*yield*/, producer.connect()]; - case 2: - _c.sent(); - return [4 /*yield*/, producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - })]; - case 3: - _c.sent(); - return [4 /*yield*/, producer.disconnect()]; - case 4: - _c.sent(); - setTimeout(function () { return updateKafkaBacklogMetrics(); }, 500); - _c.label = 5; - case 5: - if (callbacks[type]) { - callbacks[type].forEach(function (callback) { - setTimeout(function () { - var callbackTimer = callbackProcessingDuration - .labels(type) - .startTimer(); - callback(payload); - eventThroughput.labels(type).inc(); - callbackTimer(); - }, 0); - }); - } - endTimer(); - return [2 /*return*/]; - } - }); - }; - _a = 0, types_1 = types; - _b.label = 1; - case 1: - if (!(_a < types_1.length)) return [3 /*break*/, 4]; - type = types_1[_a]; - return [5 /*yield**/, _loop_2(type)]; - case 2: - _b.sent(); - _b.label = 3; - case 3: - _a++; - return [3 /*break*/, 1]; - case 4: return [2 /*return*/]; - } - }); - }); - }, - subscribe: function (type, callback) { - return __awaiter(this, void 0, void 0, function () { - var wasNewTopic; - var _this = this; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - if (!callbacks[type]) - callbacks[type] = new Set(); - callbacks[type].add(callback); - subscriptionRate.labels(type).inc(); - eventSubscriptionGauge.labels(type).set(callbacks[type].size); - if (!socket) return [3 /*break*/, 1]; - socket.emit("subscribe", type); - return [3 /*break*/, 3]; - case 1: - if (!kafka) return [3 /*break*/, 3]; - wasNewTopic = !subscribedTopics.has(type); - if (!wasNewTopic) return [3 /*break*/, 3]; - subscribedTopics.add(type); - return [4 /*yield*/, this.restartKafkaConsumer()]; - case 2: - _a.sent(); - setTimeout(function () { - updateKafkaBacklogMetrics(); - }, 1000); - _a.label = 3; - case 3: return [2 /*return*/, function () { return __awaiter(_this, void 0, void 0, function () { - return __generator(this, function (_a) { - callbacks[type].delete(callback); - unsubscriptionRate.labels(type).inc(); - if (callbacks[type].size === 0) { - delete callbacks[type]; - eventSubscriptionGauge.labels(type).set(0); - if (socket) { - socket.emit("unsubscribe", type); - } - } - else { - eventSubscriptionGauge.labels(type).set(callbacks[type].size); - } - return [2 /*return*/]; - }); - }); }]; - } - }); - }); - }, - restartKafkaConsumer: function () { - return __awaiter(this, void 0, void 0, function () { - var _this = this; - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - if (!kafka || subscribedTopics.size === 0) - return [2 /*return*/]; - if (!(sharedConsumer && isConsumerRunning)) return [3 /*break*/, 3]; - console.log("Stopping existing Kafka consumer..."); - return [4 /*yield*/, sharedConsumer.stop()]; - case 1: - _a.sent(); - return [4 /*yield*/, sharedConsumer.disconnect()]; - case 2: - _a.sent(); - sharedConsumer = null; - isConsumerRunning = false; - _a.label = 3; - case 3: - console.log("Starting Kafka consumer with topics: ".concat(Array.from(subscribedTopics).join(", "))); - sharedConsumer = kafka.consumer({ groupId: kafkaGroupId }); - return [4 /*yield*/, sharedConsumer.connect()]; - case 4: - _a.sent(); - return [4 /*yield*/, sharedConsumer.subscribe({ - topics: Array.from(subscribedTopics), - fromBeginning: false, - })]; - case 5: - _a.sent(); - return [4 /*yield*/, sharedConsumer.run({ - partitionsConsumedConcurrently: 1, - eachMessage: function (_a) { return __awaiter(_this, [_a], void 0, function (_b) { - var payload_1, callbackTimer; - var _c; - var topic = _b.topic, partition = _b.partition, message = _b.message; - return __generator(this, function (_d) { - if (callbacks[topic]) { - try { - payload_1 = JSON.parse(((_c = message.value) === null || _c === void 0 ? void 0 : _c.toString()) || "{}"); - callbackTimer = callbackProcessingDuration - .labels(topic) - .startTimer(); - callbacks[topic].forEach(function (cb) { - cb(payload_1); - eventThroughput.labels(topic).inc(); - }); - callbackTimer(); - } - catch (error) { - console.error("Error processing message for topic ".concat(topic, ":"), error); - eventPublishErrors.labels(topic, "processing_error").inc(); - } - } - return [2 /*return*/]; - }); - }); }, - })]; - case 6: - _a.sent(); - isConsumerRunning = true; - return [2 /*return*/]; - } - }); - }); - }, - disconnect: function () { - return __awaiter(this, void 0, void 0, function () { - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - event.stopBacklogMonitoring(); - if (!socket) return [3 /*break*/, 1]; - socket.disconnect(); - socket = null; - return [3 /*break*/, 5]; - case 1: - if (!kafka) return [3 /*break*/, 5]; - if (!(sharedConsumer && isConsumerRunning)) return [3 /*break*/, 4]; - return [4 /*yield*/, sharedConsumer.stop()]; - case 2: - _a.sent(); - return [4 /*yield*/, sharedConsumer.disconnect()]; - case 3: - _a.sent(); - sharedConsumer = null; - isConsumerRunning = false; - _a.label = 4; - case 4: - subscribedTopics.clear(); - kafka = null; - kafkaGroupId = null; - _a.label = 5; - case 5: - Object.keys(callbacks).forEach(function (key) { return delete callbacks[key]; }); - return [2 /*return*/]; - } - }); - }); - }, -}; -exports.event = event; diff --git a/client/Event.ts b/client/Event.ts deleted file mode 100644 index b434234..0000000 --- a/client/Event.ts +++ /dev/null @@ -1,417 +0,0 @@ -import * as client from "prom-client"; - -import { Consumer, Kafka } from "kafkajs"; -import { Socket, io } from "socket.io-client"; - -let socket: Socket | null = null; -let kafka: Kafka | null = null; -let kafkaGroupId: string | null = null; -let sharedConsumer: Consumer | null = null; -let subscribedTopics: Set = new Set(); -let backlogMonitoringInterval: NodeJS.Timeout | null = null; -let isConsumerRunning = false; - -const callbacks: Record> = {}; - -interface BaseInitOptions { - type: "inMemory" | "socket" | "kafka"; -} - -interface InMemoryOptions extends BaseInitOptions { - type: "inMemory"; - host: string; - port?: number; - protocol: string; -} - -interface KafkaOptions extends BaseInitOptions { - type: "kafka"; - clientId: string; - brokers: string[]; - groupId: string; -} - -type InitOptions = InMemoryOptions | KafkaOptions; - -type Callback = (payload: T) => void; - -const eventPublishCounter = new client.Counter({ - name: "events_published_total", - help: "Total number of events published", - labelNames: ["event_type"], -}); - -const eventSubscriptionGauge = new client.Gauge({ - name: "active_event_subscriptions", - help: "Number of active event subscriptions", - labelNames: ["event_type"], -}); - -const eventPublishDuration = new client.Histogram({ - name: "event_publish_duration_seconds", - help: "Time taken to publish events", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], -}); - -// Track payload size for analysis -const eventPayloadSize = new client.Histogram({ - name: "event_payload_size_bytes", - help: "Size of event payloads in bytes", - labelNames: ["event_type"], - buckets: [10, 100, 1000, 10000, 100000, 1000000], -}); - -// Track error rates -const eventPublishErrors = new client.Counter({ - name: "event_publish_errors_total", - help: "Total number of event publish errors", - labelNames: ["event_type", "error_type"], -}); - -// Track callback processing duration -const callbackProcessingDuration = new client.Histogram({ - name: "event_callback_duration_seconds", - help: "Time taken to process event callbacks", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], -}); - -// Track subscription rates -const subscriptionRate = new client.Counter({ - name: "event_subscriptions_total", - help: "Total number of event subscriptions created", - labelNames: ["event_type"], -}); - -// Track unsubscription rates -const unsubscriptionRate = new client.Counter({ - name: "event_unsubscriptions_total", - help: "Total number of event unsubscriptions", - labelNames: ["event_type"], -}); - -// Track throughput (events processed per second) -const eventThroughput = new client.Counter({ - name: "event_callbacks_processed_total", - help: "Total number of event callbacks processed successfully", - labelNames: ["event_type"], -}); - -const kafkaBacklogSize = new client.Gauge({ - name: "kafka_backlog_events_total", - help: "Total number of events waiting to be processed", - labelNames: ["topic"], -}); - -// Function to update Kafka backlog metrics -const updateKafkaBacklogMetrics = async () => { - if (!kafka || !kafkaGroupId || subscribedTopics.size === 0) return; - - const admin = kafka.admin(); - await admin.connect(); - - for (const topic of Array.from(subscribedTopics)) { - // Get consumer group offsets - const offsetsResponse = await admin.fetchOffsets({ - groupId: kafkaGroupId, - topics: [topic], - }); - - // Get latest offsets for the topic - const topicOffsets = await admin.fetchTopicOffsets(topic); - - let totalLag = 0; - - // The response structure is: [{ topic: string, partitions: FetchOffsetsPartition[] }] - const topicResponse = offsetsResponse.find( - (response) => response.topic === topic - ); - - if (topicResponse) { - // Calculate lag for each partition - topicResponse.partitions.forEach((partitionOffset) => { - const latestOffset = topicOffsets.find( - (to) => to.partition === partitionOffset.partition - ); - - if (latestOffset) { - const consumerOffset = parseInt(partitionOffset.offset); - const latestOffsetValue = parseInt(latestOffset.offset); - const lag = Math.max(0, latestOffsetValue - consumerOffset); - totalLag += lag; - } - }); - } - - kafkaBacklogSize.labels(topic).set(totalLag); - console.log(`Backlog for topic ${topic}: ${totalLag} messages`); - } - - await admin.disconnect(); -}; - -const event = { - init(options: InitOptions) { - switch (options.type) { - case "inMemory": - if (!options.host) { - throw new Error("host is required for inMemory initialization"); - } - if (!options.protocol) { - throw new Error("protocol is required for inMemory initialization"); - } - - const { host, protocol } = options; - - const socketPath = options?.port - ? `${protocol}://${host}:${options.port}` - : `${protocol}://${host}`; - - socket = io(socketPath); - socket.on( - "event", - ({ type, payload }: { type: string; payload: any }) => { - if (callbacks[type]) { - callbacks[type].forEach((cb) => cb(payload)); - } - } - ); - break; - case "kafka": - if (!options.clientId) { - throw new Error("clientId is required for Kafka initialization"); - } - if ( - !options.brokers || - !Array.isArray(options.brokers) || - options.brokers.length === 0 - ) { - throw new Error("brokers array is required for Kafka initialization"); - } - if (!options.groupId) { - throw new Error("groupId is required for Kafka initialization"); - } - - kafka = new Kafka({ - clientId: options.clientId, - brokers: options.brokers, - }); - kafkaGroupId = options.groupId; - - // Start backlog monitoring after Kafka initialization - event.startBacklogMonitoring(); - break; - } - }, - - // Start backlog monitoring - startBacklogMonitoring(intervalMs: number = 30000) { - if (kafka && !backlogMonitoringInterval) { - console.log("Starting Kafka backlog monitoring..."); - - // Run once immediately - updateKafkaBacklogMetrics(); - - // Set up periodic monitoring - backlogMonitoringInterval = setInterval(() => { - updateKafkaBacklogMetrics(); - }, intervalMs); - } - }, - - // Stop backlog monitoring - stopBacklogMonitoring() { - if (backlogMonitoringInterval) { - clearInterval(backlogMonitoringInterval); - backlogMonitoringInterval = null; - console.log("Stopped Kafka backlog monitoring"); - } - }, - - // Manual backlog check - async checkBacklog(): Promise { - if (kafka) { - await updateKafkaBacklogMetrics(); - } - }, - - async publish(...args: [...string[], T]): Promise { - if (args.length < 2) { - throw new Error("publish requires at least one event type and a payload"); - } - - const payload = args[args.length - 1]; - const types = args.slice(0, -1) as string[]; - - for (const type of types) { - console.log("node-event", "publish", type, payload); - - if ( - type === "__proto__" || - type === "constructor" || - type === "prototype" - ) { - throw new Error("Invalid publish type"); - } - - const endTimer = eventPublishDuration.labels(type).startTimer(); - eventPublishCounter.labels(type).inc(); - - const payloadSize = JSON.stringify(payload).length; - eventPayloadSize.labels(type).observe(payloadSize); - - if (socket) { - socket.emit("publish", { type, payload }); - } else if (kafka) { - const producer = kafka.producer(); - await producer.connect(); - - await producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); - - await producer.disconnect(); - - setTimeout(() => updateKafkaBacklogMetrics(), 500); - } - - if (callbacks[type]) { - callbacks[type].forEach((callback) => { - setTimeout(() => { - const callbackTimer = callbackProcessingDuration - .labels(type) - .startTimer(); - - callback(payload); - eventThroughput.labels(type).inc(); - callbackTimer(); - }, 0); - }); - } - - endTimer(); - } - }, - - async subscribe( - type: string, - callback: Callback - ): Promise<() => void> { - if (!callbacks[type]) callbacks[type] = new Set(); - - callbacks[type].add(callback as Callback); - - subscriptionRate.labels(type).inc(); - eventSubscriptionGauge.labels(type).set(callbacks[type].size); - - if (socket) { - socket.emit("subscribe", type); - } else if (kafka) { - const wasNewTopic = !subscribedTopics.has(type); - if (wasNewTopic) { - subscribedTopics.add(type); - await this.restartKafkaConsumer(); - - setTimeout(() => { - updateKafkaBacklogMetrics(); - }, 1000); - } - } - - return async () => { - callbacks[type].delete(callback as Callback); - - unsubscriptionRate.labels(type).inc(); - - if (callbacks[type].size === 0) { - delete callbacks[type]; - eventSubscriptionGauge.labels(type).set(0); - if (socket) { - socket.emit("unsubscribe", type); - } - } else { - eventSubscriptionGauge.labels(type).set(callbacks[type].size); - } - }; - }, - - async restartKafkaConsumer() { - if (!kafka || subscribedTopics.size === 0) return; - - if (sharedConsumer && isConsumerRunning) { - console.log("Stopping existing Kafka consumer..."); - await sharedConsumer.stop(); - await sharedConsumer.disconnect(); - sharedConsumer = null; - isConsumerRunning = false; - } - - console.log( - `Starting Kafka consumer with topics: ${Array.from(subscribedTopics).join( - ", " - )}` - ); - sharedConsumer = kafka.consumer({ groupId: kafkaGroupId! }); - await sharedConsumer.connect(); - - await sharedConsumer.subscribe({ - topics: Array.from(subscribedTopics), - fromBeginning: false, - }); - - await sharedConsumer.run({ - partitionsConsumedConcurrently: 1, - eachMessage: async ({ topic, partition, message }) => { - if (callbacks[topic]) { - try { - const payload = JSON.parse(message.value?.toString() || "{}"); - const callbackTimer = callbackProcessingDuration - .labels(topic) - .startTimer(); - - callbacks[topic].forEach((cb) => { - cb(payload); - eventThroughput.labels(topic).inc(); - }); - - callbackTimer(); - } catch (error) { - console.error( - `Error processing message for topic ${topic}:`, - error - ); - eventPublishErrors.labels(topic, "processing_error").inc(); - } - } - }, - }); - - isConsumerRunning = true; - }, - - async disconnect() { - event.stopBacklogMonitoring(); - - if (socket) { - socket.disconnect(); - socket = null; - } else if (kafka) { - if (sharedConsumer && isConsumerRunning) { - await sharedConsumer.stop(); - await sharedConsumer.disconnect(); - sharedConsumer = null; - isConsumerRunning = false; - } - - subscribedTopics.clear(); - kafka = null; - kafkaGroupId = null; - } - - Object.keys(callbacks).forEach((key) => delete callbacks[key]); - }, -}; - -export { event, client }; diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts new file mode 100644 index 0000000..1222b24 --- /dev/null +++ b/client/adapters/KafkaAdapter.ts @@ -0,0 +1,163 @@ +import { Consumer, Kafka, Producer } from "kafkajs"; + +import { EventAdapter } from "../types/types"; + +export class KafkaAdapter implements EventAdapter { + private kafka: Kafka; + private consumer: Consumer | null = null; + private producer: Producer | null = null; + private messageHandler?: (type: string, payload: any) => void; + private subscribedTopics = new Set(); + private isRunning = false; + + constructor( + private readonly options: { + clientId: string; + brokers: string[]; + groupId: string; + } + ) { + this.kafka = new Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + } + + async connect(): Promise { + this.producer = this.kafka.producer(); + await this.producer.connect(); + } + + async disconnect(): Promise { + if (this.consumer && this.isRunning) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + this.isRunning = false; + } + + this.subscribedTopics.clear(); + } + + async publish(type: string, payload: T): Promise { + const producer = this.kafka.producer(); + await producer.connect(); + + try { + await producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + } finally { + await producer.disconnect(); + } + } + + async subscribe(type: string): Promise { + if (!this.subscribedTopics.has(type)) { + this.subscribedTopics.add(type); + await this.restartConsumer(); + } + } + + async unsubscribe(type: string): Promise { + this.subscribedTopics.delete(type); + if (this.subscribedTopics.size > 0) { + await this.restartConsumer(); + } else if (this.consumer) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + this.isRunning = false; + } + } + + onMessage(handler: (type: string, payload: any) => void): void { + this.messageHandler = handler; + } + + private async restartConsumer(): Promise { + if (this.subscribedTopics.size === 0) return; + + if (this.consumer && this.isRunning) { + console.log("Stopping existing Kafka consumer..."); + await this.consumer.stop(); + await this.consumer.disconnect(); + this.isRunning = false; + } + + console.log( + `Starting Kafka consumer with topics: ${Array.from( + this.subscribedTopics + ).join(", ")}` + ); + this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); + await this.consumer.connect(); + + await this.consumer.subscribe({ + topics: Array.from(this.subscribedTopics), + fromBeginning: false, + }); + + await this.consumer.run({ + partitionsConsumedConcurrently: 1, + eachMessage: async ({ topic, message }) => { + if (this.messageHandler) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + this.messageHandler(topic, payload); + } catch (error) { + console.error( + `Error processing message for topic ${topic}:`, + error + ); + } + } + }, + }); + + this.isRunning = true; + } + + async getBacklog(): Promise> { + const backlogMap = new Map(); + if (this.subscribedTopics.size === 0) return backlogMap; + + const admin = this.kafka.admin(); + await admin.connect(); + + try { + for (const topic of this.subscribedTopics) { + const offsetsResponse = await admin.fetchOffsets({ + groupId: this.options.groupId, + topics: [topic], + }); + + const topicOffsets = await admin.fetchTopicOffsets(topic); + let totalLag = 0; + + const topicResponse = offsetsResponse.find(r => r.topic === topic); + if (topicResponse) { + topicResponse.partitions.forEach((partitionOffset) => { + const latestOffset = topicOffsets.find( + to => to.partition === partitionOffset.partition + ); + + if (latestOffset) { + const consumerOffset = parseInt(partitionOffset.offset); + const latestOffsetValue = parseInt(latestOffset.offset); + totalLag += Math.max(0, latestOffsetValue - consumerOffset); + } + }); + } + + backlogMap.set(topic, totalLag); + } + } finally { + await admin.disconnect(); + } + + return backlogMap; + } +} + diff --git a/client/adapters/SocketAdapter.ts b/client/adapters/SocketAdapter.ts new file mode 100644 index 0000000..02ddff3 --- /dev/null +++ b/client/adapters/SocketAdapter.ts @@ -0,0 +1,59 @@ +import { Socket, io } from "socket.io-client"; + +import { EventAdapter } from "../types/types"; + +export class SocketAdapter implements EventAdapter { + private socket: Socket | null = null; + private messageHandler?: (type: string, payload: any) => void; + + constructor(private readonly options: { + host: string; + port?: number; + protocol: string; + }) {} + + async connect(): Promise { + const { host, port, protocol } = this.options; + const socketPath = port ? `${protocol}://${host}:${port}` : `${protocol}://${host}`; + + this.socket = io(socketPath); + + this.socket.on("event", ({ type, payload }: { type: string; payload: any }) => { + if (this.messageHandler) { + this.messageHandler(type, payload); + } + }); + } + + async disconnect(): Promise { + if (this.socket) { + this.socket.disconnect(); + this.socket = null; + } + } + + async publish(type: string, payload: T): Promise { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("publish", { type, payload }); + } + + async subscribe(type: string): Promise { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("subscribe", type); + } + + async unsubscribe(type: string): Promise { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("unsubscribe", type); + } + + onMessage(handler: (type: string, payload: any) => void): void { + this.messageHandler = handler; + } +} \ No newline at end of file diff --git a/client/eventManager.ts b/client/eventManager.ts new file mode 100644 index 0000000..282fb54 --- /dev/null +++ b/client/eventManager.ts @@ -0,0 +1,179 @@ +import { Callback, EventAdapter, InitOptions } from "./types/types"; + +import { EventMetrics } from "./metrics"; +import { KafkaAdapter } from "./adapters/KafkaAdapter"; +import { SocketAdapter } from "./adapters/SocketAdapter"; + +export class EventManager { + private adapter: EventAdapter | null = null; + private callbacks: Map> = new Map(); + private metrics = new EventMetrics(); + private backlogInterval: NodeJS.Timeout | null = null; + + async init(options: InitOptions): Promise { + if (this.adapter) { + await this.disconnect(); + } + + switch (options.type) { + case "inMemory": + this.adapter = new SocketAdapter({ + host: options.host, + port: options.port, + protocol: options.protocol, + }); + break; + + case "kafka": + this.adapter = new KafkaAdapter({ + clientId: options.clientId, + brokers: options.brokers, + groupId: options.groupId, + }); + this.startBacklogMonitoring(); + break; + + default: + throw new Error(`Unknown adapter type: ${(options as any).type}`); + } + + await this.adapter.connect(); + + this.adapter.onMessage((type, payload) => { + this.handleIncomingMessage(type, payload); + }); + } + + async publish(...args: [...string[], T]): Promise { + if (args.length < 2) { + throw new Error("publish requires at least one event type and a payload"); + } + + if (!this.adapter) { + throw new Error("Event system not initialized"); + } + + const payload = args[args.length - 1]; + const types = args.slice(0, -1) as string[]; + + for (const type of types) { + this.validateEventType(type); + + const payloadSize = JSON.stringify(payload).length; + const endTimer = this.metrics.recordPublish(type, payloadSize); + + try { + await this.adapter.publish(type, payload); + + this.executeCallbacks(type, payload); + + endTimer(); + } catch (error) { + this.metrics.recordPublishError(type, "publish_error"); + endTimer(); + throw error; + } + } + } + + async subscribe(type: string, callback: Callback): Promise<() => void> { + if (!this.callbacks.has(type)) { + this.callbacks.set(type, new Set()); + } + + const callbackSet = this.callbacks.get(type)!; + callbackSet.add(callback); + + this.metrics.updateSubscriptions(type, callbackSet.size); + + if (this.adapter && callbackSet.size === 1) { + await this.adapter.subscribe(type); + } + + return async () => { + callbackSet.delete(callback); + + if (callbackSet.size === 0) { + this.callbacks.delete(type); + if (this.adapter) { + await this.adapter.unsubscribe(type); + } + } + + this.metrics.updateSubscriptions(type, callbackSet.size); + }; + } + + async disconnect(): Promise { + this.stopBacklogMonitoring(); + + if (this.adapter) { + await this.adapter.disconnect(); + this.adapter = null; + } + + this.callbacks.clear(); + } + + private handleIncomingMessage(type: string, payload: any): void { + this.executeCallbacks(type, payload); + } + + private executeCallbacks(type: string, payload: any): void { + const callbackSet = this.callbacks.get(type); + if (!callbackSet) return; + + callbackSet.forEach(callback => { + setTimeout(() => { + const endTimer = this.metrics.recordCallback(type); + try { + callback(payload); + } catch (error) { + console.error(`Error in callback for ${type}:`, error); + } + endTimer(); + }, 0); + }); + } + + private validateEventType(type: string): void { + if (type === "__proto__" || type === "constructor" || type === "prototype") { + throw new Error("Invalid event type"); + } + } + + private startBacklogMonitoring(intervalMs: number = 30000): void { + if (!(this.adapter instanceof KafkaAdapter)) return; + + this.updateBacklogMetrics(); + + this.backlogInterval = setInterval(() => { + this.updateBacklogMetrics(); + }, intervalMs); + } + + private stopBacklogMonitoring(): void { + if (this.backlogInterval) { + clearInterval(this.backlogInterval); + this.backlogInterval = null; + } + } + + private async updateBacklogMetrics(): Promise { + if (!(this.adapter instanceof KafkaAdapter)) return; + + try { + const backlog = await this.adapter.getBacklog(); + backlog.forEach((size, topic) => { + this.metrics.updateKafkaBacklog(topic, size); + console.log(`Backlog for topic ${topic}: ${size} messages`); + }); + } catch (error) { + console.error("Error updating backlog metrics:", error); + } + } + + async checkBacklog(): Promise { + await this.updateBacklogMetrics(); + } +} \ No newline at end of file diff --git a/client/index.ts b/client/index.ts new file mode 100644 index 0000000..377abda --- /dev/null +++ b/client/index.ts @@ -0,0 +1,31 @@ +import * as client from "prom-client"; + +import { EventManager } from "./eventManager"; +import { EventMetrics } from "./metrics"; +import { KafkaAdapter } from "./adapters/KafkaAdapter"; +import { SocketAdapter } from "./adapters/SocketAdapter"; + +const manager = new EventManager(); + +export const event = { + init: (options: any) => manager.init(options), + publish: (...args: [...string[], T]) => manager.publish(...args), + subscribe: (type: string, callback: any) => manager.subscribe(type, callback), + disconnect: () => manager.disconnect(), + checkBacklog: () => manager.checkBacklog(), + + startBacklogMonitoring: () => { + console.log("Backlog monitoring starts automatically with Kafka adapter"); + }, + stopBacklogMonitoring: () => { + console.log("Backlog monitoring stops automatically on disconnect"); + }, + restartKafkaConsumer: async () => { + console.log("Consumer restart is handled automatically"); + }, +}; + +export { client }; + +export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; +export * from "./types/types"; \ No newline at end of file diff --git a/client/metrics.ts b/client/metrics.ts new file mode 100644 index 0000000..96d5c0b --- /dev/null +++ b/client/metrics.ts @@ -0,0 +1,77 @@ +import * as client from "prom-client"; + +export class EventMetrics { + private readonly publishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], + }); + + private readonly subscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], + }); + + private readonly publishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + }); + + private readonly payloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], + }); + + private readonly publishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], + }); + + private readonly callbackDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + }); + + private readonly throughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], + }); + + private readonly kafkaBacklog = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], + }); + + recordPublish(type: string, payloadSizeBytes: number): () => void { + this.publishCounter.labels(type).inc(); + this.payloadSize.labels(type).observe(payloadSizeBytes); + return this.publishDuration.labels(type).startTimer(); + } + + recordPublishError(type: string, errorType: string): void { + this.publishErrors.labels(type, errorType).inc(); + } + + recordCallback(type: string): () => void { + this.throughput.labels(type).inc(); + return this.callbackDuration.labels(type).startTimer(); + } + + updateSubscriptions(type: string, count: number): void { + this.subscriptionGauge.labels(type).set(count); + } + + updateKafkaBacklog(topic: string, size: number): void { + this.kafkaBacklog.labels(topic).set(size); + } +} \ No newline at end of file diff --git a/client/types/types.ts b/client/types/types.ts new file mode 100644 index 0000000..0aed371 --- /dev/null +++ b/client/types/types.ts @@ -0,0 +1,30 @@ +export type Callback = (payload: T) => void; + +export interface EventAdapter { + connect(): Promise; + disconnect(): Promise; + publish(type: string, payload: T): Promise; + subscribe(type: string): Promise; + unsubscribe(type: string): Promise; + onMessage(handler: (type: string, payload: any) => void): void; +} + +export interface BaseInitOptions { + type: "inMemory" | "kafka"; +} + +export interface InMemoryOptions extends BaseInitOptions { + type: "inMemory"; + host: string; + port?: number; + protocol: string; +} + +export interface KafkaOptions extends BaseInitOptions { + type: "kafka"; + clientId: string; + brokers: string[]; + groupId: string; +} + +export type InitOptions = InMemoryOptions | KafkaOptions; \ No newline at end of file diff --git a/package.json b/package.json index ed7f9e0..1efa605 100644 --- a/package.json +++ b/package.json @@ -52,9 +52,9 @@ "types": "./src/Event.d.ts" }, "./client": { - "import": "./client/Event.js", - "require": "./client/Event.js", - "types": "./client/Event.d.ts" + "import": "./client/index.js", + "require": "./client/index.js", + "types": "./client/index.d.ts" }, "./server": { "import": "./server/server.js", From 92b678ec04031c2d4fe8d40654721e06ecee8e99 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Sat, 27 Sep 2025 13:10:32 +0300 Subject: [PATCH 12/35] Add event system with Kafka and Socket adapters Introduces a modular event system supporting Kafka and Socket.io adapters, including event publishing, subscription management, metrics collection, and backlog monitoring. Updates build scripts and TypeScript configuration for proper compilation and type declarations. --- client/adapters/KafkaAdapter.d.ts | 23 +++++ client/adapters/KafkaAdapter.js | 133 +++++++++++++++++++++++++ client/adapters/SocketAdapter.d.ts | 17 ++++ client/adapters/SocketAdapter.js | 50 ++++++++++ client/eventManager.d.ts | 18 ++++ client/eventManager.js | 151 +++++++++++++++++++++++++++++ client/index.d.ts | 18 ++++ client/index.js | 67 +++++++++++++ client/metrics.d.ts | 15 +++ client/metrics.js | 101 +++++++++++++++++++ client/types/types.d.ts | 25 +++++ client/types/types.js | 2 + package.json | 4 +- tsconfig.json | 16 ++- 14 files changed, 636 insertions(+), 4 deletions(-) create mode 100644 client/adapters/KafkaAdapter.d.ts create mode 100644 client/adapters/KafkaAdapter.js create mode 100644 client/adapters/SocketAdapter.d.ts create mode 100644 client/adapters/SocketAdapter.js create mode 100644 client/eventManager.d.ts create mode 100644 client/eventManager.js create mode 100644 client/index.d.ts create mode 100644 client/index.js create mode 100644 client/metrics.d.ts create mode 100644 client/metrics.js create mode 100644 client/types/types.d.ts create mode 100644 client/types/types.js diff --git a/client/adapters/KafkaAdapter.d.ts b/client/adapters/KafkaAdapter.d.ts new file mode 100644 index 0000000..fe299f3 --- /dev/null +++ b/client/adapters/KafkaAdapter.d.ts @@ -0,0 +1,23 @@ +import { EventAdapter } from "../types/types"; +export declare class KafkaAdapter implements EventAdapter { + private readonly options; + private kafka; + private consumer; + private producer; + private messageHandler?; + private subscribedTopics; + private isRunning; + constructor(options: { + clientId: string; + brokers: string[]; + groupId: string; + }); + connect(): Promise; + disconnect(): Promise; + publish(type: string, payload: T): Promise; + subscribe(type: string): Promise; + unsubscribe(type: string): Promise; + onMessage(handler: (type: string, payload: any) => void): void; + private restartConsumer; + getBacklog(): Promise>; +} diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js new file mode 100644 index 0000000..597467e --- /dev/null +++ b/client/adapters/KafkaAdapter.js @@ -0,0 +1,133 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.KafkaAdapter = void 0; +const kafkajs_1 = require("kafkajs"); +class KafkaAdapter { + options; + kafka; + consumer = null; + producer = null; + messageHandler; + subscribedTopics = new Set(); + isRunning = false; + constructor(options) { + this.options = options; + this.kafka = new kafkajs_1.Kafka({ + clientId: options.clientId, + brokers: options.brokers, + }); + } + async connect() { + this.producer = this.kafka.producer(); + await this.producer.connect(); + } + async disconnect() { + if (this.consumer && this.isRunning) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + this.isRunning = false; + } + this.subscribedTopics.clear(); + } + async publish(type, payload) { + const producer = this.kafka.producer(); + await producer.connect(); + try { + await producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + } + finally { + await producer.disconnect(); + } + } + async subscribe(type) { + if (!this.subscribedTopics.has(type)) { + this.subscribedTopics.add(type); + await this.restartConsumer(); + } + } + async unsubscribe(type) { + this.subscribedTopics.delete(type); + if (this.subscribedTopics.size > 0) { + await this.restartConsumer(); + } + else if (this.consumer) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + this.isRunning = false; + } + } + onMessage(handler) { + this.messageHandler = handler; + } + async restartConsumer() { + if (this.subscribedTopics.size === 0) + return; + if (this.consumer && this.isRunning) { + console.log("Stopping existing Kafka consumer..."); + await this.consumer.stop(); + await this.consumer.disconnect(); + this.isRunning = false; + } + console.log(`Starting Kafka consumer with topics: ${Array.from(this.subscribedTopics).join(", ")}`); + this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); + await this.consumer.connect(); + await this.consumer.subscribe({ + topics: Array.from(this.subscribedTopics), + fromBeginning: false, + }); + await this.consumer.run({ + partitionsConsumedConcurrently: 1, + eachMessage: async ({ topic, message }) => { + if (this.messageHandler) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + this.messageHandler(topic, payload); + } + catch (error) { + console.error(`Error processing message for topic ${topic}:`, error); + } + } + }, + }); + this.isRunning = true; + } + async getBacklog() { + const backlogMap = new Map(); + if (this.subscribedTopics.size === 0) + return backlogMap; + const admin = this.kafka.admin(); + await admin.connect(); + try { + for (const topic of this.subscribedTopics) { + const offsetsResponse = await admin.fetchOffsets({ + groupId: this.options.groupId, + topics: [topic], + }); + const topicOffsets = await admin.fetchTopicOffsets(topic); + let totalLag = 0; + const topicResponse = offsetsResponse.find(r => r.topic === topic); + if (topicResponse) { + topicResponse.partitions.forEach((partitionOffset) => { + const latestOffset = topicOffsets.find(to => to.partition === partitionOffset.partition); + if (latestOffset) { + const consumerOffset = parseInt(partitionOffset.offset); + const latestOffsetValue = parseInt(latestOffset.offset); + totalLag += Math.max(0, latestOffsetValue - consumerOffset); + } + }); + } + backlogMap.set(topic, totalLag); + } + } + finally { + await admin.disconnect(); + } + return backlogMap; + } +} +exports.KafkaAdapter = KafkaAdapter; diff --git a/client/adapters/SocketAdapter.d.ts b/client/adapters/SocketAdapter.d.ts new file mode 100644 index 0000000..2352675 --- /dev/null +++ b/client/adapters/SocketAdapter.d.ts @@ -0,0 +1,17 @@ +import { EventAdapter } from "../types/types"; +export declare class SocketAdapter implements EventAdapter { + private readonly options; + private socket; + private messageHandler?; + constructor(options: { + host: string; + port?: number; + protocol: string; + }); + connect(): Promise; + disconnect(): Promise; + publish(type: string, payload: T): Promise; + subscribe(type: string): Promise; + unsubscribe(type: string): Promise; + onMessage(handler: (type: string, payload: any) => void): void; +} diff --git a/client/adapters/SocketAdapter.js b/client/adapters/SocketAdapter.js new file mode 100644 index 0000000..99d18ec --- /dev/null +++ b/client/adapters/SocketAdapter.js @@ -0,0 +1,50 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.SocketAdapter = void 0; +const socket_io_client_1 = require("socket.io-client"); +class SocketAdapter { + options; + socket = null; + messageHandler; + constructor(options) { + this.options = options; + } + async connect() { + const { host, port, protocol } = this.options; + const socketPath = port ? `${protocol}://${host}:${port}` : `${protocol}://${host}`; + this.socket = (0, socket_io_client_1.io)(socketPath); + this.socket.on("event", ({ type, payload }) => { + if (this.messageHandler) { + this.messageHandler(type, payload); + } + }); + } + async disconnect() { + if (this.socket) { + this.socket.disconnect(); + this.socket = null; + } + } + async publish(type, payload) { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("publish", { type, payload }); + } + async subscribe(type) { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("subscribe", type); + } + async unsubscribe(type) { + if (!this.socket) { + throw new Error("Socket not connected"); + } + this.socket.emit("unsubscribe", type); + } + onMessage(handler) { + this.messageHandler = handler; + } +} +exports.SocketAdapter = SocketAdapter; diff --git a/client/eventManager.d.ts b/client/eventManager.d.ts new file mode 100644 index 0000000..6e61898 --- /dev/null +++ b/client/eventManager.d.ts @@ -0,0 +1,18 @@ +import { Callback, InitOptions } from "./types/types"; +export declare class EventManager { + private adapter; + private callbacks; + private metrics; + private backlogInterval; + init(options: InitOptions): Promise; + publish(...args: [...string[], T]): Promise; + subscribe(type: string, callback: Callback): Promise<() => void>; + disconnect(): Promise; + private handleIncomingMessage; + private executeCallbacks; + private validateEventType; + private startBacklogMonitoring; + private stopBacklogMonitoring; + private updateBacklogMetrics; + checkBacklog(): Promise; +} diff --git a/client/eventManager.js b/client/eventManager.js new file mode 100644 index 0000000..f9514ab --- /dev/null +++ b/client/eventManager.js @@ -0,0 +1,151 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.EventManager = void 0; +const metrics_1 = require("./metrics"); +const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); +const SocketAdapter_1 = require("./adapters/SocketAdapter"); +class EventManager { + adapter = null; + callbacks = new Map(); + metrics = new metrics_1.EventMetrics(); + backlogInterval = null; + async init(options) { + if (this.adapter) { + await this.disconnect(); + } + switch (options.type) { + case "inMemory": + this.adapter = new SocketAdapter_1.SocketAdapter({ + host: options.host, + port: options.port, + protocol: options.protocol, + }); + break; + case "kafka": + this.adapter = new KafkaAdapter_1.KafkaAdapter({ + clientId: options.clientId, + brokers: options.brokers, + groupId: options.groupId, + }); + this.startBacklogMonitoring(); + break; + default: + throw new Error(`Unknown adapter type: ${options.type}`); + } + await this.adapter.connect(); + this.adapter.onMessage((type, payload) => { + this.handleIncomingMessage(type, payload); + }); + } + async publish(...args) { + if (args.length < 2) { + throw new Error("publish requires at least one event type and a payload"); + } + if (!this.adapter) { + throw new Error("Event system not initialized"); + } + const payload = args[args.length - 1]; + const types = args.slice(0, -1); + for (const type of types) { + this.validateEventType(type); + const payloadSize = JSON.stringify(payload).length; + const endTimer = this.metrics.recordPublish(type, payloadSize); + try { + await this.adapter.publish(type, payload); + this.executeCallbacks(type, payload); + endTimer(); + } + catch (error) { + this.metrics.recordPublishError(type, "publish_error"); + endTimer(); + throw error; + } + } + } + async subscribe(type, callback) { + if (!this.callbacks.has(type)) { + this.callbacks.set(type, new Set()); + } + const callbackSet = this.callbacks.get(type); + callbackSet.add(callback); + this.metrics.updateSubscriptions(type, callbackSet.size); + if (this.adapter && callbackSet.size === 1) { + await this.adapter.subscribe(type); + } + return async () => { + callbackSet.delete(callback); + if (callbackSet.size === 0) { + this.callbacks.delete(type); + if (this.adapter) { + await this.adapter.unsubscribe(type); + } + } + this.metrics.updateSubscriptions(type, callbackSet.size); + }; + } + async disconnect() { + this.stopBacklogMonitoring(); + if (this.adapter) { + await this.adapter.disconnect(); + this.adapter = null; + } + this.callbacks.clear(); + } + handleIncomingMessage(type, payload) { + this.executeCallbacks(type, payload); + } + executeCallbacks(type, payload) { + const callbackSet = this.callbacks.get(type); + if (!callbackSet) + return; + callbackSet.forEach(callback => { + setTimeout(() => { + const endTimer = this.metrics.recordCallback(type); + try { + callback(payload); + } + catch (error) { + console.error(`Error in callback for ${type}:`, error); + } + endTimer(); + }, 0); + }); + } + validateEventType(type) { + if (type === "__proto__" || type === "constructor" || type === "prototype") { + throw new Error("Invalid event type"); + } + } + startBacklogMonitoring(intervalMs = 30000) { + if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) + return; + this.updateBacklogMetrics(); + this.backlogInterval = setInterval(() => { + this.updateBacklogMetrics(); + }, intervalMs); + } + stopBacklogMonitoring() { + if (this.backlogInterval) { + clearInterval(this.backlogInterval); + this.backlogInterval = null; + } + } + async updateBacklogMetrics() { + if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) + return; + try { + const backlog = await this.adapter.getBacklog(); + backlog.forEach((size, topic) => { + this.metrics.updateKafkaBacklog(topic, size); + console.log(`Backlog for topic ${topic}: ${size} messages`); + }); + } + catch (error) { + console.error("Error updating backlog metrics:", error); + } + } + async checkBacklog() { + await this.updateBacklogMetrics(); + } +} +exports.EventManager = EventManager; diff --git a/client/index.d.ts b/client/index.d.ts new file mode 100644 index 0000000..7d7db4b --- /dev/null +++ b/client/index.d.ts @@ -0,0 +1,18 @@ +import * as client from "prom-client"; +import { EventManager } from "./eventManager"; +import { EventMetrics } from "./metrics"; +import { KafkaAdapter } from "./adapters/KafkaAdapter"; +import { SocketAdapter } from "./adapters/SocketAdapter"; +export declare const event: { + init: (options: any) => Promise; + publish: (...args: [...string[], T]) => Promise; + subscribe: (type: string, callback: any) => Promise<() => void>; + disconnect: () => Promise; + checkBacklog: () => Promise; + startBacklogMonitoring: () => void; + stopBacklogMonitoring: () => void; + restartKafkaConsumer: () => Promise; +}; +export { client }; +export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; +export * from "./types/types"; diff --git a/client/index.js b/client/index.js new file mode 100644 index 0000000..6938413 --- /dev/null +++ b/client/index.js @@ -0,0 +1,67 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; +})(); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.KafkaAdapter = exports.SocketAdapter = exports.EventMetrics = exports.EventManager = exports.client = exports.event = void 0; +const client = __importStar(require("prom-client")); +exports.client = client; +const eventManager_1 = require("./eventManager"); +Object.defineProperty(exports, "EventManager", { enumerable: true, get: function () { return eventManager_1.EventManager; } }); +const metrics_1 = require("./metrics"); +Object.defineProperty(exports, "EventMetrics", { enumerable: true, get: function () { return metrics_1.EventMetrics; } }); +const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); +Object.defineProperty(exports, "KafkaAdapter", { enumerable: true, get: function () { return KafkaAdapter_1.KafkaAdapter; } }); +const SocketAdapter_1 = require("./adapters/SocketAdapter"); +Object.defineProperty(exports, "SocketAdapter", { enumerable: true, get: function () { return SocketAdapter_1.SocketAdapter; } }); +const manager = new eventManager_1.EventManager(); +exports.event = { + init: (options) => manager.init(options), + publish: (...args) => manager.publish(...args), + subscribe: (type, callback) => manager.subscribe(type, callback), + disconnect: () => manager.disconnect(), + checkBacklog: () => manager.checkBacklog(), + startBacklogMonitoring: () => { + console.log("Backlog monitoring starts automatically with Kafka adapter"); + }, + stopBacklogMonitoring: () => { + console.log("Backlog monitoring stops automatically on disconnect"); + }, + restartKafkaConsumer: async () => { + console.log("Consumer restart is handled automatically"); + }, +}; +__exportStar(require("./types/types"), exports); diff --git a/client/metrics.d.ts b/client/metrics.d.ts new file mode 100644 index 0000000..f751689 --- /dev/null +++ b/client/metrics.d.ts @@ -0,0 +1,15 @@ +export declare class EventMetrics { + private readonly publishCounter; + private readonly subscriptionGauge; + private readonly publishDuration; + private readonly payloadSize; + private readonly publishErrors; + private readonly callbackDuration; + private readonly throughput; + private readonly kafkaBacklog; + recordPublish(type: string, payloadSizeBytes: number): () => void; + recordPublishError(type: string, errorType: string): void; + recordCallback(type: string): () => void; + updateSubscriptions(type: string, count: number): void; + updateKafkaBacklog(topic: string, size: number): void; +} diff --git a/client/metrics.js b/client/metrics.js new file mode 100644 index 0000000..41ecd1b --- /dev/null +++ b/client/metrics.js @@ -0,0 +1,101 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.EventMetrics = void 0; +const client = __importStar(require("prom-client")); +class EventMetrics { + publishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], + }); + subscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], + }); + publishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + }); + payloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], + }); + publishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], + }); + callbackDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + }); + throughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], + }); + kafkaBacklog = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], + }); + recordPublish(type, payloadSizeBytes) { + this.publishCounter.labels(type).inc(); + this.payloadSize.labels(type).observe(payloadSizeBytes); + return this.publishDuration.labels(type).startTimer(); + } + recordPublishError(type, errorType) { + this.publishErrors.labels(type, errorType).inc(); + } + recordCallback(type) { + this.throughput.labels(type).inc(); + return this.callbackDuration.labels(type).startTimer(); + } + updateSubscriptions(type, count) { + this.subscriptionGauge.labels(type).set(count); + } + updateKafkaBacklog(topic, size) { + this.kafkaBacklog.labels(topic).set(size); + } +} +exports.EventMetrics = EventMetrics; diff --git a/client/types/types.d.ts b/client/types/types.d.ts new file mode 100644 index 0000000..7e52b81 --- /dev/null +++ b/client/types/types.d.ts @@ -0,0 +1,25 @@ +export type Callback = (payload: T) => void; +export interface EventAdapter { + connect(): Promise; + disconnect(): Promise; + publish(type: string, payload: T): Promise; + subscribe(type: string): Promise; + unsubscribe(type: string): Promise; + onMessage(handler: (type: string, payload: any) => void): void; +} +export interface BaseInitOptions { + type: "inMemory" | "kafka"; +} +export interface InMemoryOptions extends BaseInitOptions { + type: "inMemory"; + host: string; + port?: number; + protocol: string; +} +export interface KafkaOptions extends BaseInitOptions { + type: "kafka"; + clientId: string; + brokers: string[]; + groupId: string; +} +export type InitOptions = InMemoryOptions | KafkaOptions; diff --git a/client/types/types.js b/client/types/types.js new file mode 100644 index 0000000..c8ad2e5 --- /dev/null +++ b/client/types/types.js @@ -0,0 +1,2 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); diff --git a/package.json b/package.json index 1efa605..5083d3f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.15", + "version": "1.1.16", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ @@ -8,7 +8,7 @@ ], "scripts": { "start": "node sample/publisher/index.js && node sample/subscriber/index.js && npx nuc-node-event-test/server", - "build": "npx tsc client/Event.ts --outDir client --declaration", + "build": "npx tsc --project tsconfig.json", "kafka:up": "docker-compose up -d", "kafka:down": "docker-compose down", "kafka:test": "node examples/kafka-example.js" diff --git a/tsconfig.json b/tsconfig.json index 64df4b9..9307acd 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -2,11 +2,23 @@ "compilerOptions": { "target": "esnext", "module": "nodenext", + "moduleResolution": "NodeNext", "esModuleInterop": true, "forceConsistentCasingInFileNames": true, "strict": true, "skipLibCheck": true, "noImplicitAny": false, - "isolatedModules": true - } + "isolatedModules": true, + "declaration": true, + "outDir": "client", + "rootDir": "client" + }, + "include": [ + "client/**/*" + ], + "exclude": [ + "node_modules", + "**/*.test.ts", + "**/*.spec.ts" + ] } From 23e466adc402a01153d69826a49a96710c8e2f02 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Thu, 2 Oct 2025 13:05:56 +0300 Subject: [PATCH 13/35] Enforce object payloads in publish/subscribe methods Updated type signatures for publish and subscribe methods in EventManager and event exports to require payloads to extend object, improving type safety. Also updated package version to 1.1.20. --- client/eventManager.d.ts | 4 ++-- client/eventManager.ts | 6 +++--- client/index.d.ts | 5 +++-- client/index.ts | 5 +++-- package.json | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/client/eventManager.d.ts b/client/eventManager.d.ts index 6e61898..b881ba0 100644 --- a/client/eventManager.d.ts +++ b/client/eventManager.d.ts @@ -5,8 +5,8 @@ export declare class EventManager { private metrics; private backlogInterval; init(options: InitOptions): Promise; - publish(...args: [...string[], T]): Promise; - subscribe(type: string, callback: Callback): Promise<() => void>; + publish(...args: [...string[], T]): Promise; + subscribe(type: string, callback: Callback): Promise<() => void>; disconnect(): Promise; private handleIncomingMessage; private executeCallbacks; diff --git a/client/eventManager.ts b/client/eventManager.ts index 282fb54..93d4ce3 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -44,8 +44,8 @@ export class EventManager { }); } - async publish(...args: [...string[], T]): Promise { - if (args.length < 2) { + async publish(...args: [...string[], T]): Promise { + if (args.length < 2) { throw new Error("publish requires at least one event type and a payload"); } @@ -76,7 +76,7 @@ export class EventManager { } } - async subscribe(type: string, callback: Callback): Promise<() => void> { + async subscribe(type: string, callback: Callback): Promise<() => void> { if (!this.callbacks.has(type)) { this.callbacks.set(type, new Set()); } diff --git a/client/index.d.ts b/client/index.d.ts index 7d7db4b..6029ddd 100644 --- a/client/index.d.ts +++ b/client/index.d.ts @@ -1,12 +1,13 @@ import * as client from "prom-client"; +import { Callback } from "./types/types"; import { EventManager } from "./eventManager"; import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; export declare const event: { init: (options: any) => Promise; - publish: (...args: [...string[], T]) => Promise; - subscribe: (type: string, callback: any) => Promise<() => void>; + publish: (...args: [...string[], T]) => Promise; + subscribe: (type: string, callback: Callback) => Promise<() => void>; disconnect: () => Promise; checkBacklog: () => Promise; startBacklogMonitoring: () => void; diff --git a/client/index.ts b/client/index.ts index 377abda..692a2aa 100644 --- a/client/index.ts +++ b/client/index.ts @@ -1,5 +1,6 @@ import * as client from "prom-client"; +import { Callback } from "./types/types"; import { EventManager } from "./eventManager"; import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; @@ -9,8 +10,8 @@ const manager = new EventManager(); export const event = { init: (options: any) => manager.init(options), - publish: (...args: [...string[], T]) => manager.publish(...args), - subscribe: (type: string, callback: any) => manager.subscribe(type, callback), + publish: (...args: [...string[], T]) => manager.publish(...args), + subscribe: (type: string, callback: Callback) => manager.subscribe(type, callback), disconnect: () => manager.disconnect(), checkBacklog: () => manager.checkBacklog(), diff --git a/package.json b/package.json index 5083d3f..46b1e15 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.16", + "version": "1.1.20", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 91da7b3580cc8a983dc2571daffcf21e3ff7022f Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Thu, 2 Oct 2025 18:56:11 +0300 Subject: [PATCH 14/35] Refactor event type handling and typing for publish Changed event type handling in publish to merge multiple types into a single string separated by underscores, ensuring consistent event naming. Updated all relevant type signatures to use object instead of any for payloads and callbacks, improving type safety. Also updated the EventAdapter interface and related adapters, and incremented the package version. --- client/adapters/KafkaAdapter.d.ts | 4 +-- client/adapters/KafkaAdapter.ts | 6 ++-- client/adapters/SocketAdapter.d.ts | 4 +-- client/adapters/SocketAdapter.ts | 8 ++--- client/eventManager.js | 33 ++++++++++----------- client/eventManager.ts | 47 +++++++++++++++--------------- client/index.d.ts | 4 +-- client/index.ts | 5 ++-- client/types/types.d.ts | 6 ++-- client/types/types.ts | 6 ++-- package.json | 2 +- server/server.ts | 2 +- 12 files changed, 63 insertions(+), 64 deletions(-) diff --git a/client/adapters/KafkaAdapter.d.ts b/client/adapters/KafkaAdapter.d.ts index fe299f3..fb575e9 100644 --- a/client/adapters/KafkaAdapter.d.ts +++ b/client/adapters/KafkaAdapter.d.ts @@ -14,10 +14,10 @@ export declare class KafkaAdapter implements EventAdapter { }); connect(): Promise; disconnect(): Promise; - publish(type: string, payload: T): Promise; + publish(type: string, payload: T): Promise; subscribe(type: string): Promise; unsubscribe(type: string): Promise; - onMessage(handler: (type: string, payload: any) => void): void; + onMessage(handler: (type: string, payload: object) => void): void; private restartConsumer; getBacklog(): Promise>; } diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index 1222b24..8d93744 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -6,7 +6,7 @@ export class KafkaAdapter implements EventAdapter { private kafka: Kafka; private consumer: Consumer | null = null; private producer: Producer | null = null; - private messageHandler?: (type: string, payload: any) => void; + private messageHandler?: (type: string, payload: object) => void; private subscribedTopics = new Set(); private isRunning = false; @@ -39,7 +39,7 @@ export class KafkaAdapter implements EventAdapter { this.subscribedTopics.clear(); } - async publish(type: string, payload: T): Promise { + async publish(type: string, payload: T): Promise { const producer = this.kafka.producer(); await producer.connect(); @@ -72,7 +72,7 @@ export class KafkaAdapter implements EventAdapter { } } - onMessage(handler: (type: string, payload: any) => void): void { + onMessage(handler: (type: string, payload: object) => void): void { this.messageHandler = handler; } diff --git a/client/adapters/SocketAdapter.d.ts b/client/adapters/SocketAdapter.d.ts index 2352675..97d3317 100644 --- a/client/adapters/SocketAdapter.d.ts +++ b/client/adapters/SocketAdapter.d.ts @@ -10,8 +10,8 @@ export declare class SocketAdapter implements EventAdapter { }); connect(): Promise; disconnect(): Promise; - publish(type: string, payload: T): Promise; + publish(type: string, payload: object): Promise; subscribe(type: string): Promise; unsubscribe(type: string): Promise; - onMessage(handler: (type: string, payload: any) => void): void; + onMessage(handler: (type: string, payload: object) => void): void; } diff --git a/client/adapters/SocketAdapter.ts b/client/adapters/SocketAdapter.ts index 02ddff3..0f940ae 100644 --- a/client/adapters/SocketAdapter.ts +++ b/client/adapters/SocketAdapter.ts @@ -4,7 +4,7 @@ import { EventAdapter } from "../types/types"; export class SocketAdapter implements EventAdapter { private socket: Socket | null = null; - private messageHandler?: (type: string, payload: any) => void; + private messageHandler?: (type: string, payload: object) => void; constructor(private readonly options: { host: string; @@ -18,7 +18,7 @@ export class SocketAdapter implements EventAdapter { this.socket = io(socketPath); - this.socket.on("event", ({ type, payload }: { type: string; payload: any }) => { + this.socket.on("event", ({ type, payload }: { type: string; payload: object }) => { if (this.messageHandler) { this.messageHandler(type, payload); } @@ -32,7 +32,7 @@ export class SocketAdapter implements EventAdapter { } } - async publish(type: string, payload: T): Promise { + async publish(type: string, payload: object): Promise { if (!this.socket) { throw new Error("Socket not connected"); } @@ -53,7 +53,7 @@ export class SocketAdapter implements EventAdapter { this.socket.emit("unsubscribe", type); } - onMessage(handler: (type: string, payload: any) => void): void { + onMessage(handler: (type: string, payload: object) => void): void { this.messageHandler = handler; } } \ No newline at end of file diff --git a/client/eventManager.js b/client/eventManager.js index f9514ab..5cdda22 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -30,7 +30,7 @@ class EventManager { this.startBacklogMonitoring(); break; default: - throw new Error(`Unknown adapter type: ${options.type}`); + throw new Error(`Unknown adapter type`); } await this.adapter.connect(); this.adapter.onMessage((type, payload) => { @@ -38,28 +38,27 @@ class EventManager { }); } async publish(...args) { - if (args.length < 2) { + if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); } if (!this.adapter) { throw new Error("Event system not initialized"); } const payload = args[args.length - 1]; - const types = args.slice(0, -1); - for (const type of types) { - this.validateEventType(type); - const payloadSize = JSON.stringify(payload).length; - const endTimer = this.metrics.recordPublish(type, payloadSize); - try { - await this.adapter.publish(type, payload); - this.executeCallbacks(type, payload); - endTimer(); - } - catch (error) { - this.metrics.recordPublishError(type, "publish_error"); - endTimer(); - throw error; - } + const type = args.slice(0, -1); + const mergedType = type.join('_'); + this.validateEventType(mergedType); + const payloadSize = JSON.stringify(payload).length; + const endTimer = this.metrics.recordPublish(mergedType, payloadSize); + try { + await this.adapter.publish(mergedType, payload); + this.executeCallbacks(mergedType, payload); + endTimer(); + } + catch (error) { + this.metrics.recordPublishError(mergedType, "publish_error"); + endTimer(); + throw error; } } async subscribe(type, callback) { diff --git a/client/eventManager.ts b/client/eventManager.ts index 93d4ce3..d5ab678 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -34,7 +34,7 @@ export class EventManager { break; default: - throw new Error(`Unknown adapter type: ${(options as any).type}`); + throw new Error(`Unknown adapter type`); } await this.adapter.connect(); @@ -45,34 +45,33 @@ export class EventManager { } async publish(...args: [...string[], T]): Promise { - if (args.length < 2) { + if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); } if (!this.adapter) { throw new Error("Event system not initialized"); } + + const payload = args[args.length - 1] as T; + const type = args.slice(0, -1) as string[]; + + const mergedType = type.join('_'); + this.validateEventType(mergedType); + + const payloadSize = JSON.stringify(payload).length; + const endTimer = this.metrics.recordPublish(mergedType, payloadSize); - const payload = args[args.length - 1]; - const types = args.slice(0, -1) as string[]; - - for (const type of types) { - this.validateEventType(type); - - const payloadSize = JSON.stringify(payload).length; - const endTimer = this.metrics.recordPublish(type, payloadSize); - - try { - await this.adapter.publish(type, payload); + try { + await this.adapter.publish(mergedType, payload); - this.executeCallbacks(type, payload); + this.executeCallbacks(mergedType, payload); - endTimer(); - } catch (error) { - this.metrics.recordPublishError(type, "publish_error"); - endTimer(); - throw error; - } + endTimer(); + } catch (error) { + this.metrics.recordPublishError(mergedType, "publish_error"); + endTimer(); + throw error; } } @@ -82,7 +81,7 @@ export class EventManager { } const callbackSet = this.callbacks.get(type)!; - callbackSet.add(callback); + callbackSet.add(callback as Callback); this.metrics.updateSubscriptions(type, callbackSet.size); @@ -91,7 +90,7 @@ export class EventManager { } return async () => { - callbackSet.delete(callback); + callbackSet.delete(callback as Callback); if (callbackSet.size === 0) { this.callbacks.delete(type); @@ -115,11 +114,11 @@ export class EventManager { this.callbacks.clear(); } - private handleIncomingMessage(type: string, payload: any): void { + private handleIncomingMessage(type: string, payload: object): void { this.executeCallbacks(type, payload); } - private executeCallbacks(type: string, payload: any): void { + private executeCallbacks(type: string, payload: object): void { const callbackSet = this.callbacks.get(type); if (!callbackSet) return; diff --git a/client/index.d.ts b/client/index.d.ts index 6029ddd..d22f9dc 100644 --- a/client/index.d.ts +++ b/client/index.d.ts @@ -1,11 +1,11 @@ import * as client from "prom-client"; -import { Callback } from "./types/types"; +import { Callback, InitOptions } from "./types/types"; import { EventManager } from "./eventManager"; import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; export declare const event: { - init: (options: any) => Promise; + init: (options: InitOptions) => Promise; publish: (...args: [...string[], T]) => Promise; subscribe: (type: string, callback: Callback) => Promise<() => void>; disconnect: () => Promise; diff --git a/client/index.ts b/client/index.ts index 692a2aa..757879f 100644 --- a/client/index.ts +++ b/client/index.ts @@ -1,6 +1,7 @@ import * as client from "prom-client"; -import { Callback } from "./types/types"; +import { Callback, InitOptions } from "./types/types"; + import { EventManager } from "./eventManager"; import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; @@ -9,7 +10,7 @@ import { SocketAdapter } from "./adapters/SocketAdapter"; const manager = new EventManager(); export const event = { - init: (options: any) => manager.init(options), + init: (options: InitOptions) => manager.init(options), publish: (...args: [...string[], T]) => manager.publish(...args), subscribe: (type: string, callback: Callback) => manager.subscribe(type, callback), disconnect: () => manager.disconnect(), diff --git a/client/types/types.d.ts b/client/types/types.d.ts index 7e52b81..fef2930 100644 --- a/client/types/types.d.ts +++ b/client/types/types.d.ts @@ -1,11 +1,11 @@ -export type Callback = (payload: T) => void; +export type Callback = (payload: T) => void; export interface EventAdapter { connect(): Promise; disconnect(): Promise; - publish(type: string, payload: T): Promise; + publish(type: string, payload: object): Promise; subscribe(type: string): Promise; unsubscribe(type: string): Promise; - onMessage(handler: (type: string, payload: any) => void): void; + onMessage(handler: (type: string, payload: object) => void): void; } export interface BaseInitOptions { type: "inMemory" | "kafka"; diff --git a/client/types/types.ts b/client/types/types.ts index 0aed371..4076904 100644 --- a/client/types/types.ts +++ b/client/types/types.ts @@ -1,12 +1,12 @@ -export type Callback = (payload: T) => void; +export type Callback = (payload: T) => void; export interface EventAdapter { connect(): Promise; disconnect(): Promise; - publish(type: string, payload: T): Promise; + publish(type: string, payload: object): Promise; subscribe(type: string): Promise; unsubscribe(type: string): Promise; - onMessage(handler: (type: string, payload: any) => void): void; + onMessage(handler: (type: string, payload: object) => void): void; } export interface BaseInitOptions { diff --git a/package.json b/package.json index 46b1e15..95cf4f8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.20", + "version": "1.1.22", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ diff --git a/server/server.ts b/server/server.ts index 9d5e3ad..6fe7a9f 100644 --- a/server/server.ts +++ b/server/server.ts @@ -29,7 +29,7 @@ io.on('connection', (socket: Socket) => { } }); - socket.on('publish', ({ type, payload }: { type: string; payload: any }) => { + socket.on('publish', ({ type, payload }: { type: string; payload: object }) => { console.log(`Publish: ${type}`, payload); if (subscriptions[type]) { subscriptions[type].forEach((sid) => { From 5bb49e34a140589e28082efce609d962454cebb4 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Thu, 2 Oct 2025 18:56:59 +0300 Subject: [PATCH 15/35] Add placeholder test script to package.json Introduced a 'test' script in package.json that outputs a placeholder message. This prepares the project for future test integration. --- package.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index 95cf4f8..b5d1ef3 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,8 @@ "build": "npx tsc --project tsconfig.json", "kafka:up": "docker-compose up -d", "kafka:down": "docker-compose down", - "kafka:test": "node examples/kafka-example.js" + "kafka:test": "node examples/kafka-example.js", + "test": "echo 'No tests specified'" }, "dependencies": { "chalk": "^4.1.2", From 23405082116b379e74960ea1eded038c252355ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Mon, 6 Oct 2025 17:07:25 +0300 Subject: [PATCH 16/35] Add Pushgateway metrics integration Introduces Pushgateway support to EventMetrics, allowing metrics to be periodically pushed to a Prometheus Pushgateway. Adds related methods to EventManager and exposes PushgatewayConfig type in the public API. --- client/eventManager.ts | 49 +++++++++++++++++++++------- client/index.ts | 19 ++++++++--- client/metrics.ts | 74 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 124 insertions(+), 18 deletions(-) diff --git a/client/eventManager.ts b/client/eventManager.ts index d5ab678..8093f18 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -1,6 +1,6 @@ import { Callback, EventAdapter, InitOptions } from "./types/types"; +import { EventMetrics, PushgatewayConfig } from "./metrics"; -import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; @@ -38,13 +38,15 @@ export class EventManager { } await this.adapter.connect(); - + this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); } - async publish(...args: [...string[], T]): Promise { + async publish( + ...args: [...string[], T] + ): Promise { if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); } @@ -52,13 +54,13 @@ export class EventManager { if (!this.adapter) { throw new Error("Event system not initialized"); } - + const payload = args[args.length - 1] as T; const type = args.slice(0, -1) as string[]; - - const mergedType = type.join('_'); + + const mergedType = type.join("_"); this.validateEventType(mergedType); - + const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); @@ -75,7 +77,10 @@ export class EventManager { } } - async subscribe(type: string, callback: Callback): Promise<() => void> { + async subscribe( + type: string, + callback: Callback + ): Promise<() => void> { if (!this.callbacks.has(type)) { this.callbacks.set(type, new Set()); } @@ -91,7 +96,7 @@ export class EventManager { return async () => { callbackSet.delete(callback as Callback); - + if (callbackSet.size === 0) { this.callbacks.delete(type); if (this.adapter) { @@ -122,7 +127,7 @@ export class EventManager { const callbackSet = this.callbacks.get(type); if (!callbackSet) return; - callbackSet.forEach(callback => { + callbackSet.forEach((callback) => { setTimeout(() => { const endTimer = this.metrics.recordCallback(type); try { @@ -136,7 +141,11 @@ export class EventManager { } private validateEventType(type: string): void { - if (type === "__proto__" || type === "constructor" || type === "prototype") { + if ( + type === "__proto__" || + type === "constructor" || + type === "prototype" + ) { throw new Error("Invalid event type"); } } @@ -175,4 +184,20 @@ export class EventManager { async checkBacklog(): Promise { await this.updateBacklogMetrics(); } -} \ No newline at end of file + + startPushgateway(config?: PushgatewayConfig): void { + this.metrics.startPushgateway(config); + } + + stopPushgateway(): void { + this.metrics.stopPushgateway(); + } + + async pushMetricsToGateway(): Promise { + await this.metrics.pushMetricsToGateway(); + } + + getPushgatewayConfig(): PushgatewayConfig | undefined { + return this.metrics.getPushgatewayConfig(); + } +} diff --git a/client/index.ts b/client/index.ts index 757879f..44876a6 100644 --- a/client/index.ts +++ b/client/index.ts @@ -1,9 +1,9 @@ import * as client from "prom-client"; import { Callback, InitOptions } from "./types/types"; +import { EventMetrics, PushgatewayConfig } from "./metrics"; import { EventManager } from "./eventManager"; -import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; @@ -11,11 +11,13 @@ const manager = new EventManager(); export const event = { init: (options: InitOptions) => manager.init(options), - publish: (...args: [...string[], T]) => manager.publish(...args), - subscribe: (type: string, callback: Callback) => manager.subscribe(type, callback), + publish: (...args: [...string[], T]) => + manager.publish(...args), + subscribe: (type: string, callback: Callback) => + manager.subscribe(type, callback), disconnect: () => manager.disconnect(), checkBacklog: () => manager.checkBacklog(), - + startBacklogMonitoring: () => { console.log("Backlog monitoring starts automatically with Kafka adapter"); }, @@ -25,9 +27,16 @@ export const event = { restartKafkaConsumer: async () => { console.log("Consumer restart is handled automatically"); }, + + startPushgateway: (config?: PushgatewayConfig) => + manager.startPushgateway(config), + stopPushgateway: () => manager.stopPushgateway(), + pushMetricsToGateway: () => manager.pushMetricsToGateway(), + getPushgatewayConfig: () => manager.getPushgatewayConfig(), }; export { client }; export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; -export * from "./types/types"; \ No newline at end of file +export type { PushgatewayConfig }; +export * from "./types/types"; diff --git a/client/metrics.ts b/client/metrics.ts index 96d5c0b..f28211e 100644 --- a/client/metrics.ts +++ b/client/metrics.ts @@ -1,6 +1,16 @@ import * as client from "prom-client"; +export interface PushgatewayConfig { + url?: string; + jobName?: string; + instance?: string; + interval?: number; +} + export class EventMetrics { + private pushgatewayInterval?: NodeJS.Timeout; + private pushgatewayConfig?: PushgatewayConfig; + private readonly publishCounter = new client.Counter({ name: "events_published_total", help: "Total number of events published", @@ -74,4 +84,66 @@ export class EventMetrics { updateKafkaBacklog(topic: string, size: number): void { this.kafkaBacklog.labels(topic).set(size); } -} \ No newline at end of file + + startPushgateway(config: PushgatewayConfig = {}): void { + this.pushgatewayConfig = { + url: config.url || "http://localhost:9091", + jobName: config.jobName || "node_events", + instance: config.instance || "default_instance", + interval: config.interval || 15000, + }; + + this.stopPushgateway(); + + this.pushgatewayInterval = setInterval(() => { + this.pushMetricsToGateway(); + }, this.pushgatewayConfig.interval); + + console.log( + `Started pushing metrics to Pushgateway every ${this.pushgatewayConfig.interval}ms` + ); + } + + stopPushgateway(): void { + if (this.pushgatewayInterval) { + clearInterval(this.pushgatewayInterval); + this.pushgatewayInterval = undefined; + console.log("Stopped pushing metrics to Pushgateway"); + } + } + + async pushMetricsToGateway(): Promise { + if (!this.pushgatewayConfig) { + throw new Error( + "Pushgateway not configured. Call startPushgateway() first." + ); + } + + try { + const body = await client.register.metrics(); + let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; + + if (this.pushgatewayConfig.instance) { + url += `/instance/${this.pushgatewayConfig.instance}`; + } + + const response = await fetch(url, { + method: "POST", + headers: { "Content-Type": "text/plain" }, + body, + }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + console.log("Metrics pushed to Pushgateway successfully"); + } catch (err) { + console.error("Failed to push metrics to Pushgateway:", err); + } + } + + getPushgatewayConfig(): PushgatewayConfig | undefined { + return this.pushgatewayConfig; + } +} From 0c48d94f6288208cfff8b504c689f7228c3f8e0d Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 7 Oct 2025 11:13:45 +0300 Subject: [PATCH 17/35] Refactor KafkaAdapter to use static topic list KafkaAdapter now receives a static list of topics at construction and no longer manages dynamic topic subscriptions. EventManager provides the topic list and handles callback registration/removal in memory. This simplifies consumer management and improves separation of concerns. --- client/adapters/KafkaAdapter.d.ts | 5 +- client/adapters/KafkaAdapter.js | 95 +++++++++--------------- client/adapters/KafkaAdapter.ts | 116 +++++++++++------------------- client/eventManager.js | 29 ++++++-- client/eventManager.ts | 53 ++++++++++---- package.json | 2 +- 6 files changed, 146 insertions(+), 154 deletions(-) diff --git a/client/adapters/KafkaAdapter.d.ts b/client/adapters/KafkaAdapter.d.ts index fb575e9..076922d 100644 --- a/client/adapters/KafkaAdapter.d.ts +++ b/client/adapters/KafkaAdapter.d.ts @@ -5,12 +5,12 @@ export declare class KafkaAdapter implements EventAdapter { private consumer; private producer; private messageHandler?; - private subscribedTopics; - private isRunning; + private readonly topics; constructor(options: { clientId: string; brokers: string[]; groupId: string; + topics: string[]; }); connect(): Promise; disconnect(): Promise; @@ -18,6 +18,5 @@ export declare class KafkaAdapter implements EventAdapter { subscribe(type: string): Promise; unsubscribe(type: string): Promise; onMessage(handler: (type: string, payload: object) => void): void; - private restartConsumer; getBacklog(): Promise>; } diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js index 597467e..99ee383 100644 --- a/client/adapters/KafkaAdapter.js +++ b/client/adapters/KafkaAdapter.js @@ -8,76 +8,22 @@ class KafkaAdapter { consumer = null; producer = null; messageHandler; - subscribedTopics = new Set(); - isRunning = false; + topics; constructor(options) { this.options = options; this.kafka = new kafkajs_1.Kafka({ clientId: options.clientId, brokers: options.brokers, }); + this.topics = options.topics; } async connect() { this.producer = this.kafka.producer(); await this.producer.connect(); - } - async disconnect() { - if (this.consumer && this.isRunning) { - await this.consumer.stop(); - await this.consumer.disconnect(); - this.consumer = null; - this.isRunning = false; - } - this.subscribedTopics.clear(); - } - async publish(type, payload) { - const producer = this.kafka.producer(); - await producer.connect(); - try { - await producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); - } - finally { - await producer.disconnect(); - } - } - async subscribe(type) { - if (!this.subscribedTopics.has(type)) { - this.subscribedTopics.add(type); - await this.restartConsumer(); - } - } - async unsubscribe(type) { - this.subscribedTopics.delete(type); - if (this.subscribedTopics.size > 0) { - await this.restartConsumer(); - } - else if (this.consumer) { - await this.consumer.stop(); - await this.consumer.disconnect(); - this.consumer = null; - this.isRunning = false; - } - } - onMessage(handler) { - this.messageHandler = handler; - } - async restartConsumer() { - if (this.subscribedTopics.size === 0) - return; - if (this.consumer && this.isRunning) { - console.log("Stopping existing Kafka consumer..."); - await this.consumer.stop(); - await this.consumer.disconnect(); - this.isRunning = false; - } - console.log(`Starting Kafka consumer with topics: ${Array.from(this.subscribedTopics).join(", ")}`); this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); await this.consumer.connect(); await this.consumer.subscribe({ - topics: Array.from(this.subscribedTopics), + topics: this.topics, fromBeginning: false, }); await this.consumer.run({ @@ -94,16 +40,43 @@ class KafkaAdapter { } }, }); - this.isRunning = true; + console.log(`Kafka consumer started with topics: ${this.topics.join(", ")}`); + } + async disconnect() { + if (this.consumer) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + } + if (this.producer) { + await this.producer.disconnect(); + this.producer = null; + } + } + async publish(type, payload) { + if (!this.producer) { + throw new Error("Producer not connected"); + } + await this.producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + } + async subscribe(type) { + // EventManager handles callback registration in memory + } + async unsubscribe(type) { + // EventManager handles callback removal in memory + } + onMessage(handler) { + this.messageHandler = handler; } async getBacklog() { const backlogMap = new Map(); - if (this.subscribedTopics.size === 0) - return backlogMap; const admin = this.kafka.admin(); await admin.connect(); try { - for (const topic of this.subscribedTopics) { + for (const topic of this.topics) { const offsetsResponse = await admin.fetchOffsets({ groupId: this.options.groupId, topics: [topic], diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index 8d93744..66db989 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -7,100 +7,36 @@ export class KafkaAdapter implements EventAdapter { private consumer: Consumer | null = null; private producer: Producer | null = null; private messageHandler?: (type: string, payload: object) => void; - private subscribedTopics = new Set(); - private isRunning = false; + private readonly topics: string[]; constructor( private readonly options: { clientId: string; brokers: string[]; groupId: string; + topics: string[]; } ) { this.kafka = new Kafka({ clientId: options.clientId, brokers: options.brokers, }); + this.topics = options.topics; } async connect(): Promise { this.producer = this.kafka.producer(); await this.producer.connect(); - } - - async disconnect(): Promise { - if (this.consumer && this.isRunning) { - await this.consumer.stop(); - await this.consumer.disconnect(); - this.consumer = null; - this.isRunning = false; - } - - this.subscribedTopics.clear(); - } - - async publish(type: string, payload: T): Promise { - const producer = this.kafka.producer(); - await producer.connect(); - - try { - await producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); - } finally { - await producer.disconnect(); - } - } - - async subscribe(type: string): Promise { - if (!this.subscribedTopics.has(type)) { - this.subscribedTopics.add(type); - await this.restartConsumer(); - } - } - - async unsubscribe(type: string): Promise { - this.subscribedTopics.delete(type); - if (this.subscribedTopics.size > 0) { - await this.restartConsumer(); - } else if (this.consumer) { - await this.consumer.stop(); - await this.consumer.disconnect(); - this.consumer = null; - this.isRunning = false; - } - } - - onMessage(handler: (type: string, payload: object) => void): void { - this.messageHandler = handler; - } - private async restartConsumer(): Promise { - if (this.subscribedTopics.size === 0) return; - - if (this.consumer && this.isRunning) { - console.log("Stopping existing Kafka consumer..."); - await this.consumer.stop(); - await this.consumer.disconnect(); - this.isRunning = false; - } - - console.log( - `Starting Kafka consumer with topics: ${Array.from( - this.subscribedTopics - ).join(", ")}` - ); this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); await this.consumer.connect(); await this.consumer.subscribe({ - topics: Array.from(this.subscribedTopics), + topics: this.topics, fromBeginning: false, }); await this.consumer.run({ - partitionsConsumedConcurrently: 1, eachMessage: async ({ topic, message }) => { if (this.messageHandler) { try { @@ -116,18 +52,53 @@ export class KafkaAdapter implements EventAdapter { }, }); - this.isRunning = true; + console.log(`Kafka consumer started with topics: ${this.topics.join(", ")}`); + } + + async disconnect(): Promise { + if (this.consumer) { + await this.consumer.stop(); + await this.consumer.disconnect(); + this.consumer = null; + } + + if (this.producer) { + await this.producer.disconnect(); + this.producer = null; + } + } + + async publish(type: string, payload: T): Promise { + if (!this.producer) { + throw new Error("Producer not connected"); + } + + await this.producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); + } + + async subscribe(type: string): Promise { + // EventManager handles callback registration in memory + } + + async unsubscribe(type: string): Promise { + // EventManager handles callback removal in memory + } + + onMessage(handler: (type: string, payload: object) => void): void { + this.messageHandler = handler; } async getBacklog(): Promise> { const backlogMap = new Map(); - if (this.subscribedTopics.size === 0) return backlogMap; const admin = this.kafka.admin(); await admin.connect(); try { - for (const topic of this.subscribedTopics) { + for (const topic of this.topics) { const offsetsResponse = await admin.fetchOffsets({ groupId: this.options.groupId, topics: [topic], @@ -159,5 +130,4 @@ export class KafkaAdapter implements EventAdapter { return backlogMap; } -} - +} \ No newline at end of file diff --git a/client/eventManager.js b/client/eventManager.js index 5cdda22..2bff5dd 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -4,6 +4,24 @@ exports.EventManager = void 0; const metrics_1 = require("./metrics"); const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); const SocketAdapter_1 = require("./adapters/SocketAdapter"); +const KAFKA_TOPICS = [ + "KNOWLEDGE_CREATED", + "MESSAGE_USER_MESSAGED", + "SESSION_USER_MESSAGED", + "TASK_CREATED", + "STEP_ADDED", + "STEP_COMPLETED", + "MESSAGE_USER_MESSAGED", + "MESSAGE_ASSISTANT_MESSAGED", + "SESSION_INITIATED", + "SESSION_USER_MESSAGED", + "SESSION_AI_MESSAGED", + "SUPERVISING_RAISED", + "SUPERVISING_ANSWERED", + "TASK_COMPLETED", + "KNOWLEDGES_LOADED", + "MESSAGES_LOADED", +]; class EventManager { adapter = null; callbacks = new Map(); @@ -26,6 +44,7 @@ class EventManager { clientId: options.clientId, brokers: options.brokers, groupId: options.groupId, + topics: KAFKA_TOPICS, }); this.startBacklogMonitoring(); break; @@ -46,7 +65,7 @@ class EventManager { } const payload = args[args.length - 1]; const type = args.slice(0, -1); - const mergedType = type.join('_'); + const mergedType = type.join("_"); this.validateEventType(mergedType); const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); @@ -96,8 +115,8 @@ class EventManager { executeCallbacks(type, payload) { const callbackSet = this.callbacks.get(type); if (!callbackSet) - return; - callbackSet.forEach(callback => { + return; // No callbacks for this topic - message ignored + callbackSet.forEach((callback) => { setTimeout(() => { const endTimer = this.metrics.recordCallback(type); try { @@ -111,7 +130,9 @@ class EventManager { }); } validateEventType(type) { - if (type === "__proto__" || type === "constructor" || type === "prototype") { + if (type === "__proto__" || + type === "constructor" || + type === "prototype") { throw new Error("Invalid event type"); } } diff --git a/client/eventManager.ts b/client/eventManager.ts index d5ab678..2b80af0 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -4,6 +4,25 @@ import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; +const KAFKA_TOPICS = [ + "KNOWLEDGE_CREATED", + "MESSAGE_USER_MESSAGED", + "SESSION_USER_MESSAGED", + "TASK_CREATED", + "STEP_ADDED", + "STEP_COMPLETED", + "MESSAGE_USER_MESSAGED", + "MESSAGE_ASSISTANT_MESSAGED", + "SESSION_INITIATED", + "SESSION_USER_MESSAGED", + "SESSION_AI_MESSAGED", + "SUPERVISING_RAISED", + "SUPERVISING_ANSWERED", + "TASK_COMPLETED", + "KNOWLEDGES_LOADED", + "MESSAGES_LOADED", +]; + export class EventManager { private adapter: EventAdapter | null = null; private callbacks: Map> = new Map(); @@ -29,6 +48,7 @@ export class EventManager { clientId: options.clientId, brokers: options.brokers, groupId: options.groupId, + topics: KAFKA_TOPICS, }); this.startBacklogMonitoring(); break; @@ -38,13 +58,15 @@ export class EventManager { } await this.adapter.connect(); - + this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); } - async publish(...args: [...string[], T]): Promise { + async publish( + ...args: [...string[], T] + ): Promise { if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); } @@ -52,13 +74,13 @@ export class EventManager { if (!this.adapter) { throw new Error("Event system not initialized"); } - + const payload = args[args.length - 1] as T; const type = args.slice(0, -1) as string[]; - - const mergedType = type.join('_'); + + const mergedType = type.join("_"); this.validateEventType(mergedType); - + const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); @@ -75,7 +97,10 @@ export class EventManager { } } - async subscribe(type: string, callback: Callback): Promise<() => void> { + async subscribe( + type: string, + callback: Callback + ): Promise<() => void> { if (!this.callbacks.has(type)) { this.callbacks.set(type, new Set()); } @@ -91,7 +116,7 @@ export class EventManager { return async () => { callbackSet.delete(callback as Callback); - + if (callbackSet.size === 0) { this.callbacks.delete(type); if (this.adapter) { @@ -120,9 +145,9 @@ export class EventManager { private executeCallbacks(type: string, payload: object): void { const callbackSet = this.callbacks.get(type); - if (!callbackSet) return; + if (!callbackSet) return; // No callbacks for this topic - message ignored - callbackSet.forEach(callback => { + callbackSet.forEach((callback) => { setTimeout(() => { const endTimer = this.metrics.recordCallback(type); try { @@ -136,7 +161,11 @@ export class EventManager { } private validateEventType(type: string): void { - if (type === "__proto__" || type === "constructor" || type === "prototype") { + if ( + type === "__proto__" || + type === "constructor" || + type === "prototype" + ) { throw new Error("Invalid event type"); } } @@ -175,4 +204,4 @@ export class EventManager { async checkBacklog(): Promise { await this.updateBacklogMetrics(); } -} \ No newline at end of file +} diff --git a/package.json b/package.json index b5d1ef3..1537c7d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.22", + "version": "1.1.24", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 198c420dbcff1b47843c91c04263f7e433189854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Wed, 8 Oct 2025 14:51:51 +0300 Subject: [PATCH 18/35] Refactor KafkaAdapter and improve eventManager topic handling Simplifies KafkaAdapter by removing per-topic subscription logic and managing a single consumer for all topics except internal ones. Updates eventManager to use a static list of Kafka topics for backlog monitoring and adapts to the new KafkaAdapter API. This improves efficiency and maintainability by centralizing topic management and reducing redundant consumer restarts. --- client/adapters/KafkaAdapter.ts | 125 ++++++++++++-------------------- client/eventManager.ts | 36 +++++---- 2 files changed, 68 insertions(+), 93 deletions(-) diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index 8d93744..1100d55 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -7,8 +7,6 @@ export class KafkaAdapter implements EventAdapter { private consumer: Consumer | null = null; private producer: Producer | null = null; private messageHandler?: (type: string, payload: object) => void; - private subscribedTopics = new Set(); - private isRunning = false; constructor( private readonly options: { @@ -26,108 +24,82 @@ export class KafkaAdapter implements EventAdapter { async connect(): Promise { this.producer = this.kafka.producer(); await this.producer.connect(); + this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); + + await this.consumer.connect(); + await this.consumer.subscribe({ + topics: [/^(?!__).*$/], + fromBeginning: false, + }); + await this.consumer.run({ + partitionsConsumedConcurrently: 48, + eachMessage: async ({ topic, message }) => { + if (topic.startsWith("__")) { + return; + } + + if (this.messageHandler) { + try { + const payload = JSON.parse(message.value?.toString() || "{}"); + this.messageHandler(topic, payload); + } catch (error) { + console.error( + `Error processing message for topic ${topic}:`, + error + ); + } + } + }, + }); + console.log(`Kafka consumer connected`); } async disconnect(): Promise { - if (this.consumer && this.isRunning) { + if (this.consumer) { await this.consumer.stop(); await this.consumer.disconnect(); this.consumer = null; - this.isRunning = false; } - - this.subscribedTopics.clear(); + if (this.producer) { + await this.producer.disconnect(); + this.producer = null; + } } async publish(type: string, payload: T): Promise { - const producer = this.kafka.producer(); - await producer.connect(); - - try { - await producer.send({ - topic: type, - messages: [{ value: JSON.stringify(payload) }], - }); - } finally { - await producer.disconnect(); + if (!this.producer) { + throw new Error("Producer not connected"); } + await this.producer.send({ + topic: type, + messages: [{ value: JSON.stringify(payload) }], + }); } async subscribe(type: string): Promise { - if (!this.subscribedTopics.has(type)) { - this.subscribedTopics.add(type); - await this.restartConsumer(); - } + // No-op: EventManager handles callback registration in memory } async unsubscribe(type: string): Promise { - this.subscribedTopics.delete(type); - if (this.subscribedTopics.size > 0) { - await this.restartConsumer(); - } else if (this.consumer) { - await this.consumer.stop(); - await this.consumer.disconnect(); - this.consumer = null; - this.isRunning = false; - } + // No-op: EventManager handles callback removal in memory } onMessage(handler: (type: string, payload: object) => void): void { this.messageHandler = handler; } - private async restartConsumer(): Promise { - if (this.subscribedTopics.size === 0) return; + async getBacklog(topics: string[]): Promise> { + const backlogMap = new Map(); - if (this.consumer && this.isRunning) { - console.log("Stopping existing Kafka consumer..."); - await this.consumer.stop(); - await this.consumer.disconnect(); - this.isRunning = false; + if (topics.length === 0) { + return backlogMap; } - console.log( - `Starting Kafka consumer with topics: ${Array.from( - this.subscribedTopics - ).join(", ")}` - ); - this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); - await this.consumer.connect(); - - await this.consumer.subscribe({ - topics: Array.from(this.subscribedTopics), - fromBeginning: false, - }); - - await this.consumer.run({ - partitionsConsumedConcurrently: 1, - eachMessage: async ({ topic, message }) => { - if (this.messageHandler) { - try { - const payload = JSON.parse(message.value?.toString() || "{}"); - this.messageHandler(topic, payload); - } catch (error) { - console.error( - `Error processing message for topic ${topic}:`, - error - ); - } - } - }, - }); - - this.isRunning = true; - } - - async getBacklog(): Promise> { - const backlogMap = new Map(); - if (this.subscribedTopics.size === 0) return backlogMap; - const admin = this.kafka.admin(); await admin.connect(); try { - for (const topic of this.subscribedTopics) { + for (const topic of topics) { const offsetsResponse = await admin.fetchOffsets({ groupId: this.options.groupId, topics: [topic], @@ -136,11 +108,11 @@ export class KafkaAdapter implements EventAdapter { const topicOffsets = await admin.fetchTopicOffsets(topic); let totalLag = 0; - const topicResponse = offsetsResponse.find(r => r.topic === topic); + const topicResponse = offsetsResponse.find((r) => r.topic === topic); if (topicResponse) { topicResponse.partitions.forEach((partitionOffset) => { const latestOffset = topicOffsets.find( - to => to.partition === partitionOffset.partition + (to) => to.partition === partitionOffset.partition ); if (latestOffset) { @@ -160,4 +132,3 @@ export class KafkaAdapter implements EventAdapter { return backlogMap; } } - diff --git a/client/eventManager.ts b/client/eventManager.ts index 8093f18..cdad872 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -4,17 +4,33 @@ import { EventMetrics, PushgatewayConfig } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; +const KAFKA_TOPICS = [ + "KNOWLEDGE_CREATED", + "MESSAGE_USER_MESSAGED", + "SESSION_USER_MESSAGED", + "TASK_CREATED", + "STEP_ADDED", + "STEP_COMPLETED", + "MESSAGE_USER_MESSAGED", + "MESSAGE_ASSISTANT_MESSAGED", + "SESSION_INITIATED", + "SESSION_USER_MESSAGED", + "SESSION_AI_MESSAGED", + "SUPERVISING_RAISED", + "SUPERVISING_ANSWERED", + "TASK_COMPLETED", + "KNOWLEDGES_LOADED", + "MESSAGES_LOADED", +]; export class EventManager { private adapter: EventAdapter | null = null; private callbacks: Map> = new Map(); private metrics = new EventMetrics(); private backlogInterval: NodeJS.Timeout | null = null; - async init(options: InitOptions): Promise { if (this.adapter) { await this.disconnect(); } - switch (options.type) { case "inMemory": this.adapter = new SocketAdapter({ @@ -23,7 +39,6 @@ export class EventManager { protocol: options.protocol, }); break; - case "kafka": this.adapter = new KafkaAdapter({ clientId: options.clientId, @@ -32,43 +47,33 @@ export class EventManager { }); this.startBacklogMonitoring(); break; - default: throw new Error(`Unknown adapter type`); } - await this.adapter.connect(); this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); } - async publish( ...args: [...string[], T] ): Promise { if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); } - if (!this.adapter) { throw new Error("Event system not initialized"); } - const payload = args[args.length - 1] as T; const type = args.slice(0, -1) as string[]; - const mergedType = type.join("_"); this.validateEventType(mergedType); - const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); - try { await this.adapter.publish(mergedType, payload); - this.executeCallbacks(mergedType, payload); - endTimer(); } catch (error) { this.metrics.recordPublishError(mergedType, "publish_error"); @@ -76,7 +81,6 @@ export class EventManager { throw error; } } - async subscribe( type: string, callback: Callback @@ -125,7 +129,7 @@ export class EventManager { private executeCallbacks(type: string, payload: object): void { const callbackSet = this.callbacks.get(type); - if (!callbackSet) return; + if (!callbackSet) return; // No callbacks for this topic - message ignored callbackSet.forEach((callback) => { setTimeout(() => { @@ -171,7 +175,7 @@ export class EventManager { if (!(this.adapter instanceof KafkaAdapter)) return; try { - const backlog = await this.adapter.getBacklog(); + const backlog = await this.adapter.getBacklog(KAFKA_TOPICS); backlog.forEach((size, topic) => { this.metrics.updateKafkaBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); From 0423ee6845ab8867f2a7faaed727cc3257a45e3b Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Wed, 8 Oct 2025 15:02:44 +0300 Subject: [PATCH 19/35] Add Pushgateway metrics support and refactor KafkaAdapter Introduces Pushgateway integration for metrics reporting, including methods to start, stop, and push metrics to a Prometheus Pushgateway. Refactors KafkaAdapter to remove internal topic tracking, use regex for topic subscription, and improve backlog calculation. Updates type definitions and public API to expose Pushgateway configuration and control methods. --- client/adapters/KafkaAdapter.d.ts | 3 +- client/adapters/KafkaAdapter.js | 26 +++++++++------- client/adapters/KafkaAdapter.ts | 1 - client/eventManager.d.ts | 5 ++++ client/eventManager.js | 14 ++++++++- client/index.d.ts | 7 ++++- client/index.js | 8 +++-- client/metrics.d.ts | 12 ++++++++ client/metrics.js | 49 +++++++++++++++++++++++++++++++ package.json | 2 +- 10 files changed, 108 insertions(+), 19 deletions(-) diff --git a/client/adapters/KafkaAdapter.d.ts b/client/adapters/KafkaAdapter.d.ts index 076922d..df0e582 100644 --- a/client/adapters/KafkaAdapter.d.ts +++ b/client/adapters/KafkaAdapter.d.ts @@ -5,7 +5,6 @@ export declare class KafkaAdapter implements EventAdapter { private consumer; private producer; private messageHandler?; - private readonly topics; constructor(options: { clientId: string; brokers: string[]; @@ -18,5 +17,5 @@ export declare class KafkaAdapter implements EventAdapter { subscribe(type: string): Promise; unsubscribe(type: string): Promise; onMessage(handler: (type: string, payload: object) => void): void; - getBacklog(): Promise>; + getBacklog(topics: string[]): Promise>; } diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js index 99ee383..e2c424e 100644 --- a/client/adapters/KafkaAdapter.js +++ b/client/adapters/KafkaAdapter.js @@ -8,14 +8,12 @@ class KafkaAdapter { consumer = null; producer = null; messageHandler; - topics; constructor(options) { this.options = options; this.kafka = new kafkajs_1.Kafka({ clientId: options.clientId, brokers: options.brokers, }); - this.topics = options.topics; } async connect() { this.producer = this.kafka.producer(); @@ -23,12 +21,15 @@ class KafkaAdapter { this.consumer = this.kafka.consumer({ groupId: this.options.groupId }); await this.consumer.connect(); await this.consumer.subscribe({ - topics: this.topics, + topics: [/^(?!__).*$/], fromBeginning: false, }); await this.consumer.run({ - partitionsConsumedConcurrently: 1, + partitionsConsumedConcurrently: 48, eachMessage: async ({ topic, message }) => { + if (topic.startsWith("__")) { + return; + } if (this.messageHandler) { try { const payload = JSON.parse(message.value?.toString() || "{}"); @@ -40,7 +41,7 @@ class KafkaAdapter { } }, }); - console.log(`Kafka consumer started with topics: ${this.topics.join(", ")}`); + console.log(`Kafka consumer connected`); } async disconnect() { if (this.consumer) { @@ -63,30 +64,33 @@ class KafkaAdapter { }); } async subscribe(type) { - // EventManager handles callback registration in memory + // No-op: EventManager handles callback registration in memory } async unsubscribe(type) { - // EventManager handles callback removal in memory + // No-op: EventManager handles callback removal in memory } onMessage(handler) { this.messageHandler = handler; } - async getBacklog() { + async getBacklog(topics) { const backlogMap = new Map(); + if (topics.length === 0) { + return backlogMap; + } const admin = this.kafka.admin(); await admin.connect(); try { - for (const topic of this.topics) { + for (const topic of topics) { const offsetsResponse = await admin.fetchOffsets({ groupId: this.options.groupId, topics: [topic], }); const topicOffsets = await admin.fetchTopicOffsets(topic); let totalLag = 0; - const topicResponse = offsetsResponse.find(r => r.topic === topic); + const topicResponse = offsetsResponse.find((r) => r.topic === topic); if (topicResponse) { topicResponse.partitions.forEach((partitionOffset) => { - const latestOffset = topicOffsets.find(to => to.partition === partitionOffset.partition); + const latestOffset = topicOffsets.find((to) => to.partition === partitionOffset.partition); if (latestOffset) { const consumerOffset = parseInt(partitionOffset.offset); const latestOffsetValue = parseInt(latestOffset.offset); diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index 82ed079..c2e92fd 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -20,7 +20,6 @@ export class KafkaAdapter implements EventAdapter { clientId: options.clientId, brokers: options.brokers, }); - this.topics = options.topics; } async connect(): Promise { diff --git a/client/eventManager.d.ts b/client/eventManager.d.ts index b881ba0..3a67e84 100644 --- a/client/eventManager.d.ts +++ b/client/eventManager.d.ts @@ -1,4 +1,5 @@ import { Callback, InitOptions } from "./types/types"; +import { PushgatewayConfig } from "./metrics"; export declare class EventManager { private adapter; private callbacks; @@ -15,4 +16,8 @@ export declare class EventManager { private stopBacklogMonitoring; private updateBacklogMetrics; checkBacklog(): Promise; + startPushgateway(config?: PushgatewayConfig): void; + stopPushgateway(): void; + pushMetricsToGateway(): Promise; + getPushgatewayConfig(): PushgatewayConfig | undefined; } diff --git a/client/eventManager.js b/client/eventManager.js index 2bff5dd..9f0ba8a 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -154,7 +154,7 @@ class EventManager { if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) return; try { - const backlog = await this.adapter.getBacklog(); + const backlog = await this.adapter.getBacklog(KAFKA_TOPICS); backlog.forEach((size, topic) => { this.metrics.updateKafkaBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); @@ -167,5 +167,17 @@ class EventManager { async checkBacklog() { await this.updateBacklogMetrics(); } + startPushgateway(config) { + this.metrics.startPushgateway(config); + } + stopPushgateway() { + this.metrics.stopPushgateway(); + } + async pushMetricsToGateway() { + await this.metrics.pushMetricsToGateway(); + } + getPushgatewayConfig() { + return this.metrics.getPushgatewayConfig(); + } } exports.EventManager = EventManager; diff --git a/client/index.d.ts b/client/index.d.ts index d22f9dc..ef778e8 100644 --- a/client/index.d.ts +++ b/client/index.d.ts @@ -1,7 +1,7 @@ import * as client from "prom-client"; import { Callback, InitOptions } from "./types/types"; +import { EventMetrics, PushgatewayConfig } from "./metrics"; import { EventManager } from "./eventManager"; -import { EventMetrics } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; export declare const event: { @@ -13,7 +13,12 @@ export declare const event: { startBacklogMonitoring: () => void; stopBacklogMonitoring: () => void; restartKafkaConsumer: () => Promise; + startPushgateway: (config?: PushgatewayConfig) => void; + stopPushgateway: () => void; + pushMetricsToGateway: () => Promise; + getPushgatewayConfig: () => PushgatewayConfig | undefined; }; export { client }; export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; +export type { PushgatewayConfig }; export * from "./types/types"; diff --git a/client/index.js b/client/index.js index 6938413..3d61e51 100644 --- a/client/index.js +++ b/client/index.js @@ -39,10 +39,10 @@ Object.defineProperty(exports, "__esModule", { value: true }); exports.KafkaAdapter = exports.SocketAdapter = exports.EventMetrics = exports.EventManager = exports.client = exports.event = void 0; const client = __importStar(require("prom-client")); exports.client = client; -const eventManager_1 = require("./eventManager"); -Object.defineProperty(exports, "EventManager", { enumerable: true, get: function () { return eventManager_1.EventManager; } }); const metrics_1 = require("./metrics"); Object.defineProperty(exports, "EventMetrics", { enumerable: true, get: function () { return metrics_1.EventMetrics; } }); +const eventManager_1 = require("./eventManager"); +Object.defineProperty(exports, "EventManager", { enumerable: true, get: function () { return eventManager_1.EventManager; } }); const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); Object.defineProperty(exports, "KafkaAdapter", { enumerable: true, get: function () { return KafkaAdapter_1.KafkaAdapter; } }); const SocketAdapter_1 = require("./adapters/SocketAdapter"); @@ -63,5 +63,9 @@ exports.event = { restartKafkaConsumer: async () => { console.log("Consumer restart is handled automatically"); }, + startPushgateway: (config) => manager.startPushgateway(config), + stopPushgateway: () => manager.stopPushgateway(), + pushMetricsToGateway: () => manager.pushMetricsToGateway(), + getPushgatewayConfig: () => manager.getPushgatewayConfig(), }; __exportStar(require("./types/types"), exports); diff --git a/client/metrics.d.ts b/client/metrics.d.ts index f751689..4d9dcf9 100644 --- a/client/metrics.d.ts +++ b/client/metrics.d.ts @@ -1,4 +1,12 @@ +export interface PushgatewayConfig { + url?: string; + jobName?: string; + instance?: string; + interval?: number; +} export declare class EventMetrics { + private pushgatewayInterval?; + private pushgatewayConfig?; private readonly publishCounter; private readonly subscriptionGauge; private readonly publishDuration; @@ -12,4 +20,8 @@ export declare class EventMetrics { recordCallback(type: string): () => void; updateSubscriptions(type: string, count: number): void; updateKafkaBacklog(topic: string, size: number): void; + startPushgateway(config?: PushgatewayConfig): void; + stopPushgateway(): void; + pushMetricsToGateway(): Promise; + getPushgatewayConfig(): PushgatewayConfig | undefined; } diff --git a/client/metrics.js b/client/metrics.js index 41ecd1b..448a593 100644 --- a/client/metrics.js +++ b/client/metrics.js @@ -36,6 +36,8 @@ Object.defineProperty(exports, "__esModule", { value: true }); exports.EventMetrics = void 0; const client = __importStar(require("prom-client")); class EventMetrics { + pushgatewayInterval; + pushgatewayConfig; publishCounter = new client.Counter({ name: "events_published_total", help: "Total number of events published", @@ -97,5 +99,52 @@ class EventMetrics { updateKafkaBacklog(topic, size) { this.kafkaBacklog.labels(topic).set(size); } + startPushgateway(config = {}) { + this.pushgatewayConfig = { + url: config.url || "http://localhost:9091", + jobName: config.jobName || "node_events", + instance: config.instance || "default_instance", + interval: config.interval || 15000, + }; + this.stopPushgateway(); + this.pushgatewayInterval = setInterval(() => { + this.pushMetricsToGateway(); + }, this.pushgatewayConfig.interval); + console.log(`Started pushing metrics to Pushgateway every ${this.pushgatewayConfig.interval}ms`); + } + stopPushgateway() { + if (this.pushgatewayInterval) { + clearInterval(this.pushgatewayInterval); + this.pushgatewayInterval = undefined; + console.log("Stopped pushing metrics to Pushgateway"); + } + } + async pushMetricsToGateway() { + if (!this.pushgatewayConfig) { + throw new Error("Pushgateway not configured. Call startPushgateway() first."); + } + try { + const body = await client.register.metrics(); + let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; + if (this.pushgatewayConfig.instance) { + url += `/instance/${this.pushgatewayConfig.instance}`; + } + const response = await fetch(url, { + method: "POST", + headers: { "Content-Type": "text/plain" }, + body, + }); + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + console.log("Metrics pushed to Pushgateway successfully"); + } + catch (err) { + console.error("Failed to push metrics to Pushgateway:", err); + } + } + getPushgatewayConfig() { + return this.pushgatewayConfig; + } } exports.EventMetrics = EventMetrics; diff --git a/package.json b/package.json index 1537c7d..2d0d195 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.24", + "version": "1.1.30", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 101a339dd6859c328b0ea63e3902024a484de8b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Thu, 9 Oct 2025 12:23:04 +0300 Subject: [PATCH 20/35] Add Oracle TxEventQ adapter and integration Introduces TxEventQAdapter for Oracle Advanced Queuing event transport, updates EventManager and types to support 'txeventq', and exposes the adapter in the public API. Adds docker-compose-oracle.yml for local Oracle DB setup and includes 'oracledb' as a dependency. --- client/adapters/TxEventQAdapter.ts | 207 +++++++++++++++++++++++++++++ client/eventManager.ts | 17 ++- client/index.ts | 11 +- client/types/types.ts | 16 ++- docker-compose-oracle.yml | 37 ++++++ package-lock.json | 18 ++- package.json | 1 + 7 files changed, 299 insertions(+), 8 deletions(-) create mode 100644 client/adapters/TxEventQAdapter.ts create mode 100644 docker-compose-oracle.yml diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts new file mode 100644 index 0000000..2cc7e88 --- /dev/null +++ b/client/adapters/TxEventQAdapter.ts @@ -0,0 +1,207 @@ +import * as oracledb from "oracledb"; + +import { EventAdapter } from "../types/types"; +import { EventMessage } from "../models/EventMessage"; + +export class TxEventQAdapter implements EventAdapter { + private connection: oracledb.Connection | null = null; + private queue: oracledb.AdvancedQueue | null = null; + private messageHandler?: (type: string, payload: object) => void; + private isRunning: boolean = false; + private subscriptionLoop: Promise | null = null; + + constructor( + private readonly options: { + connectString: string; + user: string; + password: string; + queueName: string; + instantClientPath?: string; + consumerName?: string; + batchSize?: number; + waitTime?: number; + } + ) {} + + async connect(): Promise { + try { + if (this.options.instantClientPath && oracledb.thin) { + try { + oracledb.initOracleClient({ + libDir: this.options.instantClientPath, + }); + console.log("Oracle Thick client initialized"); + } catch (initError: any) { + if (initError.code !== "NJS-509") { + throw initError; + } + console.log("Oracle Thick client already initialized"); + } + } + + this.connection = await oracledb.getConnection({ + connectString: this.options.connectString, + user: this.options.user, + password: this.options.password, + }); + + this.queue = await this.connection.getQueue( + this.options.queueName, + { + payloadType: oracledb.DB_TYPE_JSON, + } as any + ); + + const batchSize = this.options.batchSize || 1; + const waitTime = this.options.waitTime || 1000; + + this.queue.deqOptions.wait = + batchSize > 1 ? oracledb.AQ_DEQ_NO_WAIT : waitTime; + + if (this.options.consumerName) { + this.queue.deqOptions.consumerName = this.options.consumerName; + } + + this.isRunning = true; + this.subscriptionLoop = this.startConsumption(); + + console.log("TxEventQ adapter connected successfully"); + } catch (error: any) { + console.error("Failed to connect to TxEventQ:", error.message); + throw error; + } + } + + async disconnect(): Promise { + this.isRunning = false; + if (this.subscriptionLoop) { + await this.subscriptionLoop; + this.subscriptionLoop = null; + } + + if (this.connection) { + try { + await this.connection.close(); + console.log("TxEventQ connection closed"); + } catch (error) { + console.error("Error closing TxEventQ connection:", error); + } + this.connection = null; + this.queue = null; + } + } + + async publish(type: string, payload: T): Promise { + if (!this.connection || !this.queue) { + throw new Error("TxEventQAdapter not connected"); + } + + try { + const message: EventMessage = { + eventType: type, + payload: payload, + timestamp: new Date().toISOString(), + userId: (payload as any).userId, + }; + + await this.queue.enqOne({ + payload: message, + correlation: message.userId?.toString() || "unknown", + priority: 0, + delay: 0, + expiration: -1, + exceptionQueue: "", + } as any); + + await this.connection.commit(); + } catch (error: any) { + console.error("Failed to publish event to TxEventQ:", error.message); + throw error; + } + } + + async subscribe(type: string): Promise { + // No-op: EventManager handles callback registration in memory + } + + async unsubscribe(type: string): Promise { + // No-op: EventManager handles callback removal in memory + } + + onMessage(handler: (type: string, payload: object) => void): void { + this.messageHandler = handler; + } + + private async startConsumption(): Promise { + if (!this.connection || !this.queue) { + throw new Error("TxEventQAdapter not initialized"); + } + + console.log("Starting TxEventQ message consumption..."); + + try { + while (this.isRunning) { + try { + let messages: oracledb.AdvancedQueueMessage[] = []; + + const batchSize = this.options.batchSize || 1; + + if (batchSize === 1) { + const message = await this.queue!.deqOne(); + if (message) { + messages = [message]; + } + } else { + const dequeuedMessages = await this.queue!.deqMany(batchSize); + if (dequeuedMessages) { + messages = dequeuedMessages; + } + } + + if (messages && messages.length > 0) { + for (const message of messages) { + const eventData: EventMessage = message.payload as any; + + if (this.messageHandler) { + try { + this.messageHandler(eventData.eventType, eventData.payload); + } catch (error) { + console.error( + `Error processing message for type ${eventData.eventType}:`, + error + ); + } + } + } + + await this.connection!.commit(); + } + } catch (error: any) { + if (error.code === 25228) { + await new Promise((resolve) => setTimeout(resolve, 100)); + continue; + } + + console.error("Error during TxEventQ consumption:", error.message); + + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + } catch (error: any) { + console.error("Fatal error during TxEventQ consumption:", error.message); + throw error; + } + + console.log("TxEventQ message consumption stopped"); + } + + async getBacklog(topics: string[]): Promise> { + const backlogMap = new Map(); + + if (topics.length === 0) { + return backlogMap; + } + // TODO: Implement backlog calculation for TxEventQ + return backlogMap; + } +} diff --git a/client/eventManager.ts b/client/eventManager.ts index dcb3137..8524692 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -3,6 +3,7 @@ import { EventMetrics, PushgatewayConfig } from "./metrics"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; +import { TxEventQAdapter } from "./adapters/TxEventQAdapter"; const KAFKA_TOPICS = [ "KNOWLEDGE_CREATED", @@ -48,11 +49,25 @@ export class EventManager { }); this.startBacklogMonitoring(); break; + + case "txeventq": + this.adapter = new TxEventQAdapter({ + connectString: options.connectString, + user: options.user, + password: options.password, + queueName: options.queueName, + instantClientPath: options.instantClientPath, + consumerName: options.consumerName, + batchSize: options.batchSize, + waitTime: options.waitTime, + }); + break; + default: throw new Error(`Unknown adapter type`); } await this.adapter.connect(); - + this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); diff --git a/client/index.ts b/client/index.ts index 44876a6..e3a0613 100644 --- a/client/index.ts +++ b/client/index.ts @@ -6,6 +6,7 @@ import { EventMetrics, PushgatewayConfig } from "./metrics"; import { EventManager } from "./eventManager"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; +import { TxEventQAdapter } from "./adapters/TxEventQAdapter"; const manager = new EventManager(); @@ -37,6 +38,14 @@ export const event = { export { client }; -export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; +export { + EventManager, + EventMetrics, + SocketAdapter, + KafkaAdapter, + TxEventQAdapter, +}; export type { PushgatewayConfig }; export * from "./types/types"; +export * from "./models/EventMessage"; +export * from "./models/EventGenerator"; diff --git a/client/types/types.ts b/client/types/types.ts index 4076904..9ea822f 100644 --- a/client/types/types.ts +++ b/client/types/types.ts @@ -10,7 +10,7 @@ export interface EventAdapter { } export interface BaseInitOptions { - type: "inMemory" | "kafka"; + type: "inMemory" | "kafka" | "txeventq"; } export interface InMemoryOptions extends BaseInitOptions { @@ -27,4 +27,16 @@ export interface KafkaOptions extends BaseInitOptions { groupId: string; } -export type InitOptions = InMemoryOptions | KafkaOptions; \ No newline at end of file +export interface TxEventQOptions extends BaseInitOptions { + type: "txeventq"; + connectString: string; + user: string; + password: string; + queueName: string; + instantClientPath?: string; + consumerName?: string; + batchSize?: number; + waitTime?: number; +} + +export type InitOptions = InMemoryOptions | KafkaOptions | TxEventQOptions; diff --git a/docker-compose-oracle.yml b/docker-compose-oracle.yml new file mode 100644 index 0000000..378ef2c --- /dev/null +++ b/docker-compose-oracle.yml @@ -0,0 +1,37 @@ +services: + oracle-db: + image: container-registry.oracle.com/database/free:latest + container_name: oracle-txeventq-poc-db + ports: + - "1522:1521" + - "5501:5500" + environment: + - ORACLE_PWD=OraclePassword123 + - ORACLE_CHARACTERSET=AL32UTF8 + - ORACLE_EDITION=free + volumes: + - oracle-data:/opt/oracle/oradata + - ./sql-start-up-scripts:/opt/oracle/scripts/startup + networks: + - oracle-network + healthcheck: + test: + [ + "CMD", + "sqlplus", + "-L", + "system/OraclePassword123@//localhost:1521/FREEPDB1", + "@/dev/null", + ] + interval: 30s + timeout: 10s + retries: 5 + start_period: 60s + +volumes: + oracle-data: + driver: local + +networks: + oracle-network: + driver: bridge diff --git a/package-lock.json b/package-lock.json index a634ac6..729a83a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,15 +1,16 @@ { - "name": "@nucleoidai/node-event", - "version": "1.1.5", + "name": "node-event-test-package", + "version": "1.1.22", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "@nucleoidai/node-event", - "version": "1.1.5", + "name": "node-event-test-package", + "version": "1.1.22", "dependencies": { "chalk": "^4.1.2", "kafkajs": "^2.2.4", + "oracledb": "^6.9.0", "prom-client": "^15.1.3", "socket.io": "^4.8.1", "socket.io-client": "^4.8.1", @@ -4462,6 +4463,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/oracledb": { + "version": "6.9.0", + "resolved": "https://registry.npmjs.org/oracledb/-/oracledb-6.9.0.tgz", + "integrity": "sha512-NwPbIGPv6m0GTFSbyy4/5WEjsKMiiJRxztLmYUcfD3oyh/uXdmVmKOwEWr84wFwWJ/0wQrYQh4PjnzvShibRaA==", + "hasInstallScript": true, + "engines": { + "node": ">=14.17" + } + }, "node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", diff --git a/package.json b/package.json index 2d0d195..f8e5655 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ "socket.io": "^4.8.1", "socket.io-client": "^4.8.1", "uuid": "^9.0.0" + "oracledb": "^6.9.0" }, "devDependencies": { "@babel/preset-env": "^7.20.2", From f133d228e76cef25eab5184a207087f75e114ba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Thu, 9 Oct 2025 12:45:00 +0300 Subject: [PATCH 21/35] Update package.json --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f8e5655..c81a69e 100644 --- a/package.json +++ b/package.json @@ -20,7 +20,7 @@ "prom-client": "^15.1.3", "socket.io": "^4.8.1", "socket.io-client": "^4.8.1", - "uuid": "^9.0.0" + "uuid": "^9.0.0", "oracledb": "^6.9.0" }, "devDependencies": { From f259a8931e5de439862a92c3fda9147628ddc273 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Thu, 9 Oct 2025 18:45:04 +0300 Subject: [PATCH 22/35] Refactor TxEventQAdapter to remove EventMessage type Replaces usage of the EventMessage type with a generic payload structure in TxEventQAdapter. Updates queue and message handling logic to use 'topic' instead of 'eventType', simplifying type usage and removing dependency on EventMessage. --- client/adapters/TxEventQAdapter.ts | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 2cc7e88..73b93c2 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -1,11 +1,10 @@ import * as oracledb from "oracledb"; import { EventAdapter } from "../types/types"; -import { EventMessage } from "../models/EventMessage"; export class TxEventQAdapter implements EventAdapter { private connection: oracledb.Connection | null = null; - private queue: oracledb.AdvancedQueue | null = null; + private queue: oracledb.AdvancedQueue | null = null; private messageHandler?: (type: string, payload: object) => void; private isRunning: boolean = false; private subscriptionLoop: Promise | null = null; @@ -45,12 +44,9 @@ export class TxEventQAdapter implements EventAdapter { password: this.options.password, }); - this.queue = await this.connection.getQueue( - this.options.queueName, - { - payloadType: oracledb.DB_TYPE_JSON, - } as any - ); + this.queue = await this.connection.getQueue(this.options.queueName, { + payloadType: oracledb.DB_TYPE_JSON, + } as any); const batchSize = this.options.batchSize || 1; const waitTime = this.options.waitTime || 1000; @@ -97,16 +93,14 @@ export class TxEventQAdapter implements EventAdapter { } try { - const message: EventMessage = { - eventType: type, + const message = { + topic: type, payload: payload, - timestamp: new Date().toISOString(), - userId: (payload as any).userId, }; await this.queue.enqOne({ payload: message, - correlation: message.userId?.toString() || "unknown", + correlation: type, priority: 0, delay: 0, expiration: -1, @@ -142,7 +136,7 @@ export class TxEventQAdapter implements EventAdapter { try { while (this.isRunning) { try { - let messages: oracledb.AdvancedQueueMessage[] = []; + let messages: oracledb.AdvancedQueueMessage[] = []; const batchSize = this.options.batchSize || 1; @@ -160,14 +154,14 @@ export class TxEventQAdapter implements EventAdapter { if (messages && messages.length > 0) { for (const message of messages) { - const eventData: EventMessage = message.payload as any; + const messageData = message.payload as any; - if (this.messageHandler) { + if (this.messageHandler && messageData.topic) { try { - this.messageHandler(eventData.eventType, eventData.payload); + this.messageHandler(messageData.topic, messageData.payload); } catch (error) { console.error( - `Error processing message for type ${eventData.eventType}:`, + `Error processing message for topic ${messageData.topic}:`, error ); } From 27c21c402bfec373b642414e33a2ca697f9d58c8 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Fri, 10 Oct 2025 17:03:20 +0300 Subject: [PATCH 23/35] Add TxEventQAdapter for Oracle AQ event queue support Introduces TxEventQAdapter to support Oracle Advanced Queuing (AQ) as an event adapter. Updates EventManager, types, and exports to allow initialization and usage of TxEventQAdapter. Also increases KafkaAdapter's partitionsConsumedConcurrently to 160 and sets a default consumer name for TxEventQAdapter. Updates package version and dependencies. --- client/adapters/KafkaAdapter.js | 2 +- client/adapters/KafkaAdapter.ts | 2 +- client/adapters/TxEventQAdapter.d.ts | 27 ++++ client/adapters/TxEventQAdapter.js | 204 +++++++++++++++++++++++++++ client/adapters/TxEventQAdapter.ts | 2 + client/eventManager.js | 13 ++ client/index.d.ts | 3 +- client/index.js | 4 +- client/index.ts | 3 +- client/types/types.d.ts | 15 +- package-lock.json | 4 +- package.json | 6 +- 12 files changed, 272 insertions(+), 13 deletions(-) create mode 100644 client/adapters/TxEventQAdapter.d.ts create mode 100644 client/adapters/TxEventQAdapter.js diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js index e2c424e..eb4a7d4 100644 --- a/client/adapters/KafkaAdapter.js +++ b/client/adapters/KafkaAdapter.js @@ -25,7 +25,7 @@ class KafkaAdapter { fromBeginning: false, }); await this.consumer.run({ - partitionsConsumedConcurrently: 48, + partitionsConsumedConcurrently: 160, eachMessage: async ({ topic, message }) => { if (topic.startsWith("__")) { return; diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index c2e92fd..868b8c8 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -33,7 +33,7 @@ export class KafkaAdapter implements EventAdapter { fromBeginning: false, }); await this.consumer.run({ - partitionsConsumedConcurrently: 48, + partitionsConsumedConcurrently: 160, eachMessage: async ({ topic, message }) => { if (topic.startsWith("__")) { return; diff --git a/client/adapters/TxEventQAdapter.d.ts b/client/adapters/TxEventQAdapter.d.ts new file mode 100644 index 0000000..59fec99 --- /dev/null +++ b/client/adapters/TxEventQAdapter.d.ts @@ -0,0 +1,27 @@ +import { EventAdapter } from "../types/types"; +export declare class TxEventQAdapter implements EventAdapter { + private readonly options; + private connection; + private queue; + private messageHandler?; + private isRunning; + private subscriptionLoop; + constructor(options: { + connectString: string; + user: string; + password: string; + queueName: string; + instantClientPath?: string; + consumerName?: string; + batchSize?: number; + waitTime?: number; + }); + connect(): Promise; + disconnect(): Promise; + publish(type: string, payload: T): Promise; + subscribe(type: string): Promise; + unsubscribe(type: string): Promise; + onMessage(handler: (type: string, payload: object) => void): void; + private startConsumption; + getBacklog(topics: string[]): Promise>; +} diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js new file mode 100644 index 0000000..c0bbad1 --- /dev/null +++ b/client/adapters/TxEventQAdapter.js @@ -0,0 +1,204 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.TxEventQAdapter = void 0; +const oracledb = __importStar(require("oracledb")); +class TxEventQAdapter { + options; + connection = null; + queue = null; + messageHandler; + isRunning = false; + subscriptionLoop = null; + constructor(options) { + this.options = options; + } + async connect() { + try { + if (this.options.instantClientPath && oracledb.thin) { + try { + oracledb.initOracleClient({ + libDir: this.options.instantClientPath, + }); + console.log("Oracle Thick client initialized"); + } + catch (initError) { + if (initError.code !== "NJS-509") { + throw initError; + } + console.log("Oracle Thick client already initialized"); + } + } + this.connection = await oracledb.getConnection({ + connectString: this.options.connectString, + user: this.options.user, + password: this.options.password, + }); + this.queue = await this.connection.getQueue(this.options.queueName, { + payloadType: oracledb.DB_TYPE_JSON, + }); + const batchSize = this.options.batchSize || 1; + const waitTime = this.options.waitTime || 1000; + this.queue.deqOptions.wait = + batchSize > 1 ? oracledb.AQ_DEQ_NO_WAIT : waitTime; + if (this.options.consumerName) { + this.queue.deqOptions.consumerName = this.options.consumerName; + } + else { + this.queue.deqOptions.consumerName = "event_subscriber"; + } + this.isRunning = true; + this.subscriptionLoop = this.startConsumption(); + console.log("TxEventQ adapter connected successfully"); + } + catch (error) { + console.error("Failed to connect to TxEventQ:", error.message); + throw error; + } + } + async disconnect() { + this.isRunning = false; + if (this.subscriptionLoop) { + await this.subscriptionLoop; + this.subscriptionLoop = null; + } + if (this.connection) { + try { + await this.connection.close(); + console.log("TxEventQ connection closed"); + } + catch (error) { + console.error("Error closing TxEventQ connection:", error); + } + this.connection = null; + this.queue = null; + } + } + async publish(type, payload) { + if (!this.connection || !this.queue) { + throw new Error("TxEventQAdapter not connected"); + } + try { + const message = { + topic: type, + payload: payload, + }; + await this.queue.enqOne({ + payload: message, + correlation: type, + priority: 0, + delay: 0, + expiration: -1, + exceptionQueue: "", + }); + await this.connection.commit(); + } + catch (error) { + console.error("Failed to publish event to TxEventQ:", error.message); + throw error; + } + } + async subscribe(type) { + // No-op: EventManager handles callback registration in memory + } + async unsubscribe(type) { + // No-op: EventManager handles callback removal in memory + } + onMessage(handler) { + this.messageHandler = handler; + } + async startConsumption() { + if (!this.connection || !this.queue) { + throw new Error("TxEventQAdapter not initialized"); + } + console.log("Starting TxEventQ message consumption..."); + try { + while (this.isRunning) { + try { + let messages = []; + const batchSize = this.options.batchSize || 1; + if (batchSize === 1) { + const message = await this.queue.deqOne(); + if (message) { + messages = [message]; + } + } + else { + const dequeuedMessages = await this.queue.deqMany(batchSize); + if (dequeuedMessages) { + messages = dequeuedMessages; + } + } + if (messages && messages.length > 0) { + for (const message of messages) { + const messageData = message.payload; + if (this.messageHandler && messageData.topic) { + try { + this.messageHandler(messageData.topic, messageData.payload); + } + catch (error) { + console.error(`Error processing message for topic ${messageData.topic}:`, error); + } + } + } + await this.connection.commit(); + } + } + catch (error) { + if (error.code === 25228) { + await new Promise((resolve) => setTimeout(resolve, 100)); + continue; + } + console.error("Error during TxEventQ consumption:", error.message); + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + } + catch (error) { + console.error("Fatal error during TxEventQ consumption:", error.message); + throw error; + } + console.log("TxEventQ message consumption stopped"); + } + async getBacklog(topics) { + const backlogMap = new Map(); + if (topics.length === 0) { + return backlogMap; + } + // TODO: Implement backlog calculation for TxEventQ + return backlogMap; + } +} +exports.TxEventQAdapter = TxEventQAdapter; diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 73b93c2..3f3620d 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -56,6 +56,8 @@ export class TxEventQAdapter implements EventAdapter { if (this.options.consumerName) { this.queue.deqOptions.consumerName = this.options.consumerName; + }else { + this.queue.deqOptions.consumerName = "event_subscriber"; } this.isRunning = true; diff --git a/client/eventManager.js b/client/eventManager.js index 9f0ba8a..0867a0d 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -4,6 +4,7 @@ exports.EventManager = void 0; const metrics_1 = require("./metrics"); const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); const SocketAdapter_1 = require("./adapters/SocketAdapter"); +const TxEventQAdapter_1 = require("./adapters/TxEventQAdapter"); const KAFKA_TOPICS = [ "KNOWLEDGE_CREATED", "MESSAGE_USER_MESSAGED", @@ -48,6 +49,18 @@ class EventManager { }); this.startBacklogMonitoring(); break; + case "txeventq": + this.adapter = new TxEventQAdapter_1.TxEventQAdapter({ + connectString: options.connectString, + user: options.user, + password: options.password, + queueName: options.queueName, + instantClientPath: options.instantClientPath, + consumerName: options.consumerName, + batchSize: options.batchSize, + waitTime: options.waitTime, + }); + break; default: throw new Error(`Unknown adapter type`); } diff --git a/client/index.d.ts b/client/index.d.ts index ef778e8..7561f88 100644 --- a/client/index.d.ts +++ b/client/index.d.ts @@ -4,6 +4,7 @@ import { EventMetrics, PushgatewayConfig } from "./metrics"; import { EventManager } from "./eventManager"; import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; +import { TxEventQAdapter } from "./adapters/TxEventQAdapter"; export declare const event: { init: (options: InitOptions) => Promise; publish: (...args: [...string[], T]) => Promise; @@ -19,6 +20,6 @@ export declare const event: { getPushgatewayConfig: () => PushgatewayConfig | undefined; }; export { client }; -export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter }; +export { EventManager, EventMetrics, SocketAdapter, KafkaAdapter, TxEventQAdapter, }; export type { PushgatewayConfig }; export * from "./types/types"; diff --git a/client/index.js b/client/index.js index 3d61e51..f3a4ce8 100644 --- a/client/index.js +++ b/client/index.js @@ -36,7 +36,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) { for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.KafkaAdapter = exports.SocketAdapter = exports.EventMetrics = exports.EventManager = exports.client = exports.event = void 0; +exports.TxEventQAdapter = exports.KafkaAdapter = exports.SocketAdapter = exports.EventMetrics = exports.EventManager = exports.client = exports.event = void 0; const client = __importStar(require("prom-client")); exports.client = client; const metrics_1 = require("./metrics"); @@ -47,6 +47,8 @@ const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); Object.defineProperty(exports, "KafkaAdapter", { enumerable: true, get: function () { return KafkaAdapter_1.KafkaAdapter; } }); const SocketAdapter_1 = require("./adapters/SocketAdapter"); Object.defineProperty(exports, "SocketAdapter", { enumerable: true, get: function () { return SocketAdapter_1.SocketAdapter; } }); +const TxEventQAdapter_1 = require("./adapters/TxEventQAdapter"); +Object.defineProperty(exports, "TxEventQAdapter", { enumerable: true, get: function () { return TxEventQAdapter_1.TxEventQAdapter; } }); const manager = new eventManager_1.EventManager(); exports.event = { init: (options) => manager.init(options), diff --git a/client/index.ts b/client/index.ts index e3a0613..0dbacdb 100644 --- a/client/index.ts +++ b/client/index.ts @@ -47,5 +47,4 @@ export { }; export type { PushgatewayConfig }; export * from "./types/types"; -export * from "./models/EventMessage"; -export * from "./models/EventGenerator"; + diff --git a/client/types/types.d.ts b/client/types/types.d.ts index fef2930..367dd14 100644 --- a/client/types/types.d.ts +++ b/client/types/types.d.ts @@ -8,7 +8,7 @@ export interface EventAdapter { onMessage(handler: (type: string, payload: object) => void): void; } export interface BaseInitOptions { - type: "inMemory" | "kafka"; + type: "inMemory" | "kafka" | "txeventq"; } export interface InMemoryOptions extends BaseInitOptions { type: "inMemory"; @@ -22,4 +22,15 @@ export interface KafkaOptions extends BaseInitOptions { brokers: string[]; groupId: string; } -export type InitOptions = InMemoryOptions | KafkaOptions; +export interface TxEventQOptions extends BaseInitOptions { + type: "txeventq"; + connectString: string; + user: string; + password: string; + queueName: string; + instantClientPath?: string; + consumerName?: string; + batchSize?: number; + waitTime?: number; +} +export type InitOptions = InMemoryOptions | KafkaOptions | TxEventQOptions; diff --git a/package-lock.json b/package-lock.json index 729a83a..c103d67 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "node-event-test-package", - "version": "1.1.22", + "version": "1.1.31", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "node-event-test-package", - "version": "1.1.22", + "version": "1.1.31", "dependencies": { "chalk": "^4.1.2", "kafkajs": "^2.2.4", diff --git a/package.json b/package.json index c81a69e..b835464 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.30", + "version": "1.1.33", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ @@ -17,11 +17,11 @@ "dependencies": { "chalk": "^4.1.2", "kafkajs": "^2.2.4", + "oracledb": "^6.9.0", "prom-client": "^15.1.3", "socket.io": "^4.8.1", "socket.io-client": "^4.8.1", - "uuid": "^9.0.0", - "oracledb": "^6.9.0" + "uuid": "^9.0.0" }, "devDependencies": { "@babel/preset-env": "^7.20.2", From 0ed662682e5906c00c31f9cbf1daed7a5b1e9ac3 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Mon, 13 Oct 2025 12:52:14 +0300 Subject: [PATCH 24/35] Refactor TxEventQAdapter to dynamic queue names Removed static queueName from configuration and refactored TxEventQAdapter to use dynamic queue names based on event type for publish/subscribe. Updated related types and EventManager initialization. Added autoCommit option and improved message consumption logic. Also added pub_test.js and test.js for usage examples. --- client/adapters/TxEventQAdapter.d.ts | 4 +- client/adapters/TxEventQAdapter.js | 111 +++++++++------------- client/adapters/TxEventQAdapter.ts | 135 +++++++++++---------------- client/eventManager.js | 1 - client/eventManager.ts | 1 - client/types/types.d.ts | 1 - client/types/types.ts | 1 - package.json | 2 +- pub_test.js | 18 ++++ test.js | 21 +++++ 10 files changed, 141 insertions(+), 154 deletions(-) create mode 100644 pub_test.js create mode 100644 test.js diff --git a/client/adapters/TxEventQAdapter.d.ts b/client/adapters/TxEventQAdapter.d.ts index 59fec99..7b2109f 100644 --- a/client/adapters/TxEventQAdapter.d.ts +++ b/client/adapters/TxEventQAdapter.d.ts @@ -5,16 +5,15 @@ export declare class TxEventQAdapter implements EventAdapter { private queue; private messageHandler?; private isRunning; - private subscriptionLoop; constructor(options: { connectString: string; user: string; password: string; - queueName: string; instantClientPath?: string; consumerName?: string; batchSize?: number; waitTime?: number; + autoCommit?: boolean; }); connect(): Promise; disconnect(): Promise; @@ -22,6 +21,5 @@ export declare class TxEventQAdapter implements EventAdapter { subscribe(type: string): Promise; unsubscribe(type: string): Promise; onMessage(handler: (type: string, payload: object) => void): void; - private startConsumption; getBacklog(topics: string[]): Promise>; } diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index c0bbad1..73ed0e4 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -41,7 +41,6 @@ class TxEventQAdapter { queue = null; messageHandler; isRunning = false; - subscriptionLoop = null; constructor(options) { this.options = options; } @@ -66,21 +65,7 @@ class TxEventQAdapter { user: this.options.user, password: this.options.password, }); - this.queue = await this.connection.getQueue(this.options.queueName, { - payloadType: oracledb.DB_TYPE_JSON, - }); - const batchSize = this.options.batchSize || 1; - const waitTime = this.options.waitTime || 1000; - this.queue.deqOptions.wait = - batchSize > 1 ? oracledb.AQ_DEQ_NO_WAIT : waitTime; - if (this.options.consumerName) { - this.queue.deqOptions.consumerName = this.options.consumerName; - } - else { - this.queue.deqOptions.consumerName = "event_subscriber"; - } this.isRunning = true; - this.subscriptionLoop = this.startConsumption(); console.log("TxEventQ adapter connected successfully"); } catch (error) { @@ -90,10 +75,6 @@ class TxEventQAdapter { } async disconnect() { this.isRunning = false; - if (this.subscriptionLoop) { - await this.subscriptionLoop; - this.subscriptionLoop = null; - } if (this.connection) { try { await this.connection.close(); @@ -107,10 +88,14 @@ class TxEventQAdapter { } } async publish(type, payload) { - if (!this.connection || !this.queue) { + if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } try { + const queueName = `TXEVENTQ_USER.${type}`; + this.queue = await this.connection.getQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + }); const message = { topic: type, payload: payload, @@ -131,66 +116,58 @@ class TxEventQAdapter { } } async subscribe(type) { - // No-op: EventManager handles callback registration in memory - } - async unsubscribe(type) { - // No-op: EventManager handles callback removal in memory - } - onMessage(handler) { - this.messageHandler = handler; - } - async startConsumption() { - if (!this.connection || !this.queue) { - throw new Error("TxEventQAdapter not initialized"); + if (!this.connection) { + throw new Error("Subscriber not initialized"); } - console.log("Starting TxEventQ message consumption..."); + this.isRunning = true; + const queueName = `TXEVENTQ_USER.${type}`; + this.queue = await this.connection.getQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + }); + this.queue.deqOptions.wait = + this.options.batchSize > 1 + ? oracledb.AQ_DEQ_NO_WAIT + : this.options.waitTime || 1000; + this.queue.deqOptions.consumerName = + this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { - try { - let messages = []; - const batchSize = this.options.batchSize || 1; - if (batchSize === 1) { - const message = await this.queue.deqOne(); - if (message) { - messages = [message]; - } - } - else { - const dequeuedMessages = await this.queue.deqMany(batchSize); - if (dequeuedMessages) { - messages = dequeuedMessages; - } + let messages = []; + if (this.options.batchSize === 1) { + console.log("Using deqOne()"); + const message = await this.queue.deqOne(); + if (message) { + messages = [message]; } - if (messages && messages.length > 0) { - for (const message of messages) { - const messageData = message.payload; - if (this.messageHandler && messageData.topic) { - try { - this.messageHandler(messageData.topic, messageData.payload); - } - catch (error) { - console.error(`Error processing message for topic ${messageData.topic}:`, error); - } - } - } - await this.connection.commit(); + } + else { + const dequeuedMessages = await this.queue.deqMany(this.options.batchSize); + if (dequeuedMessages) { + messages = dequeuedMessages; } } - catch (error) { - if (error.code === 25228) { - await new Promise((resolve) => setTimeout(resolve, 100)); - continue; + if (messages && messages.length > 0) { + if (this.options.autoCommit) { + await this.connection.commit(); + console.log(`Transaction committed for ${messages.length} message(s)`); } - console.error("Error during TxEventQ consumption:", error.message); - await new Promise((resolve) => setTimeout(resolve, 1000)); } } } catch (error) { - console.error("Fatal error during TxEventQ consumption:", error.message); + console.error("Fatal error during consumption:", error.message); throw error; } - console.log("TxEventQ message consumption stopped"); + } + async unsubscribe(type) { + if (!this.connection) { + throw new Error("Subscriber not initialized"); + } + this.isRunning = false; + this.queue = null; + } + onMessage(handler) { + this.messageHandler = handler; } async getBacklog(topics) { const backlogMap = new Map(); diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 3f3620d..fcb954d 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -7,18 +7,17 @@ export class TxEventQAdapter implements EventAdapter { private queue: oracledb.AdvancedQueue | null = null; private messageHandler?: (type: string, payload: object) => void; private isRunning: boolean = false; - private subscriptionLoop: Promise | null = null; constructor( private readonly options: { connectString: string; user: string; password: string; - queueName: string; instantClientPath?: string; consumerName?: string; - batchSize?: number; + batchSize?: number waitTime?: number; + autoCommit?: boolean; } ) {} @@ -44,24 +43,7 @@ export class TxEventQAdapter implements EventAdapter { password: this.options.password, }); - this.queue = await this.connection.getQueue(this.options.queueName, { - payloadType: oracledb.DB_TYPE_JSON, - } as any); - - const batchSize = this.options.batchSize || 1; - const waitTime = this.options.waitTime || 1000; - - this.queue.deqOptions.wait = - batchSize > 1 ? oracledb.AQ_DEQ_NO_WAIT : waitTime; - - if (this.options.consumerName) { - this.queue.deqOptions.consumerName = this.options.consumerName; - }else { - this.queue.deqOptions.consumerName = "event_subscriber"; - } - this.isRunning = true; - this.subscriptionLoop = this.startConsumption(); console.log("TxEventQ adapter connected successfully"); } catch (error: any) { @@ -72,10 +54,6 @@ export class TxEventQAdapter implements EventAdapter { async disconnect(): Promise { this.isRunning = false; - if (this.subscriptionLoop) { - await this.subscriptionLoop; - this.subscriptionLoop = null; - } if (this.connection) { try { @@ -90,11 +68,19 @@ export class TxEventQAdapter implements EventAdapter { } async publish(type: string, payload: T): Promise { - if (!this.connection || !this.queue) { + if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } try { + + const queueName = `TXEVENTQ_USER.${type}`; + + this.queue = await this.connection.getQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + } as any); + + const message = { topic: type, payload: payload, @@ -117,78 +103,69 @@ export class TxEventQAdapter implements EventAdapter { } async subscribe(type: string): Promise { - // No-op: EventManager handles callback registration in memory - } + if (!this.connection) { + throw new Error("Subscriber not initialized"); + } - async unsubscribe(type: string): Promise { - // No-op: EventManager handles callback removal in memory - } + this.isRunning = true; + const queueName = `TXEVENTQ_USER.${type}`; - onMessage(handler: (type: string, payload: object) => void): void { - this.messageHandler = handler; - } + this.queue = await this.connection.getQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + } as any); - private async startConsumption(): Promise { - if (!this.connection || !this.queue) { - throw new Error("TxEventQAdapter not initialized"); - } + this.queue.deqOptions.wait = + this.options.batchSize! > 1 + ? oracledb.AQ_DEQ_NO_WAIT + : this.options.waitTime || 1000; - console.log("Starting TxEventQ message consumption..."); + this.queue.deqOptions.consumerName = + this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { - try { - let messages: oracledb.AdvancedQueueMessage[] = []; - - const batchSize = this.options.batchSize || 1; - - if (batchSize === 1) { - const message = await this.queue!.deqOne(); - if (message) { - messages = [message]; - } - } else { - const dequeuedMessages = await this.queue!.deqMany(batchSize); - if (dequeuedMessages) { - messages = dequeuedMessages; - } - } + let messages: oracledb.AdvancedQueueMessage[] = []; - if (messages && messages.length > 0) { - for (const message of messages) { - const messageData = message.payload as any; - - if (this.messageHandler && messageData.topic) { - try { - this.messageHandler(messageData.topic, messageData.payload); - } catch (error) { - console.error( - `Error processing message for topic ${messageData.topic}:`, - error - ); - } - } - } - - await this.connection!.commit(); + if (this.options.batchSize === 1) { + const message = await this.queue.deqOne(); + if (message) { + messages = [message]; } - } catch (error: any) { - if (error.code === 25228) { - await new Promise((resolve) => setTimeout(resolve, 100)); - continue; + } else { + const dequeuedMessages = await this.queue.deqMany( + this.options.batchSize! + ); + if (dequeuedMessages) { + messages = dequeuedMessages; } + } - console.error("Error during TxEventQ consumption:", error.message); + if (messages && messages.length > 0) { - await new Promise((resolve) => setTimeout(resolve, 1000)); + if (this.options.autoCommit) { + await this.connection!.commit(); + console.log( + `Transaction committed for ${messages.length} message(s)` + ); + } } } } catch (error: any) { - console.error("Fatal error during TxEventQ consumption:", error.message); + console.error("Fatal error during consumption:", error.message); throw error; } + } - console.log("TxEventQ message consumption stopped"); + async unsubscribe(type: string): Promise { + if (!this.connection) { + throw new Error("Subscriber not initialized"); + } + this.isRunning = false; + this.queue = null; + } + + onMessage(handler: (type: string, payload: object) => void): void { + this.messageHandler = handler; } async getBacklog(topics: string[]): Promise> { diff --git a/client/eventManager.js b/client/eventManager.js index 0867a0d..7e9d0d0 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -54,7 +54,6 @@ class EventManager { connectString: options.connectString, user: options.user, password: options.password, - queueName: options.queueName, instantClientPath: options.instantClientPath, consumerName: options.consumerName, batchSize: options.batchSize, diff --git a/client/eventManager.ts b/client/eventManager.ts index 8524692..deb724a 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -55,7 +55,6 @@ export class EventManager { connectString: options.connectString, user: options.user, password: options.password, - queueName: options.queueName, instantClientPath: options.instantClientPath, consumerName: options.consumerName, batchSize: options.batchSize, diff --git a/client/types/types.d.ts b/client/types/types.d.ts index 367dd14..f526111 100644 --- a/client/types/types.d.ts +++ b/client/types/types.d.ts @@ -27,7 +27,6 @@ export interface TxEventQOptions extends BaseInitOptions { connectString: string; user: string; password: string; - queueName: string; instantClientPath?: string; consumerName?: string; batchSize?: number; diff --git a/client/types/types.ts b/client/types/types.ts index 9ea822f..366abb1 100644 --- a/client/types/types.ts +++ b/client/types/types.ts @@ -32,7 +32,6 @@ export interface TxEventQOptions extends BaseInitOptions { connectString: string; user: string; password: string; - queueName: string; instantClientPath?: string; consumerName?: string; batchSize?: number; diff --git a/package.json b/package.json index b835464..2386e52 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.33", + "version": "1.1.36", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ diff --git a/pub_test.js b/pub_test.js new file mode 100644 index 0000000..2787d17 --- /dev/null +++ b/pub_test.js @@ -0,0 +1,18 @@ +const { EventManager } = require("./client"); + +const eventManager = new EventManager(); + +eventManager.init({ + type: "txeventq", + connectString: "localhost:1522/FREEPDB1", + user: "txeventq_user", + password: "pass123", + instantClientPath: + "C:\\Users\\Halil\\Downloads\\instantclient-basic-windows.x64-23.9.0.25.07\\instantclient_23_9", + autoCommit: true, +}); + +eventManager.publish("test", { + message: "Hello, world!", +}); + diff --git a/test.js b/test.js new file mode 100644 index 0000000..9ae3bcb --- /dev/null +++ b/test.js @@ -0,0 +1,21 @@ +const { EventManager } = require("./client"); + +const eventManager = new EventManager(); + +(async () => { + await eventManager.init({ + type: "txeventq", + connectString: "localhost:1522/FREEPDB1", + user: "txeventq_user", + password: "pass123", + instantClientPath: + "C:\\Users\\Halil\\Downloads\\instantclient-basic-windows.x64-23.9.0.25.07\\instantclient_23_9", + autoCommit: true, + }); +})(); + +await (async () => { + await eventManager.subscribe("test", (payload) => { + console.log(payload); + }); +})(); From 18c66629373d099e3fb3ab20cccee5e99263da9c Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Mon, 13 Oct 2025 17:46:29 +0300 Subject: [PATCH 25/35] Refactor TxEventQAdapter to always use deqOne Simplified message dequeuing logic in TxEventQAdapter by removing batch processing and always using deqOne with a fixed wait time of 5000ms. Improved error handling and message processing, and updated package version to 1.1.38. --- client/adapters/TxEventQAdapter.js | 32 ++++++++-------- client/adapters/TxEventQAdapter.ts | 59 ++++++++++++++---------------- package.json | 2 +- 3 files changed, 43 insertions(+), 50 deletions(-) diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index 73ed0e4..eec30f7 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -124,29 +124,27 @@ class TxEventQAdapter { this.queue = await this.connection.getQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, }); - this.queue.deqOptions.wait = - this.options.batchSize > 1 - ? oracledb.AQ_DEQ_NO_WAIT - : this.options.waitTime || 1000; + this.queue.deqOptions.wait = 5000; this.queue.deqOptions.consumerName = this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { let messages = []; - if (this.options.batchSize === 1) { - console.log("Using deqOne()"); - const message = await this.queue.deqOne(); - if (message) { - messages = [message]; - } - } - else { - const dequeuedMessages = await this.queue.deqMany(this.options.batchSize); - if (dequeuedMessages) { - messages = dequeuedMessages; - } + const message = await this.queue.deqOne(); + if (message) { + messages = [message]; } if (messages && messages.length > 0) { + if (this.messageHandler) { + try { + const payload = message.payload.payload || {}; + console.log("test-payload", payload); + this.messageHandler(type, payload); + } + catch (error) { + console.error(`Error processing message for topic ${type}:`, error); + } + } if (this.options.autoCommit) { await this.connection.commit(); console.log(`Transaction committed for ${messages.length} message(s)`); @@ -155,7 +153,7 @@ class TxEventQAdapter { } } catch (error) { - console.error("Fatal error during consumption:", error.message); + console.error("Fatal error during consumption:", error); throw error; } } diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index fcb954d..05d6356 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -15,7 +15,7 @@ export class TxEventQAdapter implements EventAdapter { password: string; instantClientPath?: string; consumerName?: string; - batchSize?: number + batchSize?: number; waitTime?: number; autoCommit?: boolean; } @@ -73,14 +73,12 @@ export class TxEventQAdapter implements EventAdapter { } try { - const queueName = `TXEVENTQ_USER.${type}`; this.queue = await this.connection.getQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, } as any); - const message = { topic: type, payload: payload, @@ -106,52 +104,48 @@ export class TxEventQAdapter implements EventAdapter { if (!this.connection) { throw new Error("Subscriber not initialized"); } - this.isRunning = true; - const queueName = `TXEVENTQ_USER.${type}`; + const queueName = `TXEVENTQ_USER.${type}`; + this.queue = await this.connection.getQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, - } as any); - - this.queue.deqOptions.wait = - this.options.batchSize! > 1 - ? oracledb.AQ_DEQ_NO_WAIT - : this.options.waitTime || 1000; - + }); + + this.queue.deqOptions.wait = 5000; this.queue.deqOptions.consumerName = - this.options.consumerName || `${type.toLowerCase()}_subscriber`; - + this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { let messages: oracledb.AdvancedQueueMessage[] = []; - - if (this.options.batchSize === 1) { - const message = await this.queue.deqOne(); - if (message) { - messages = [message]; - } - } else { - const dequeuedMessages = await this.queue.deqMany( - this.options.batchSize! - ); - if (dequeuedMessages) { - messages = dequeuedMessages; - } + + const message = await this.queue.deqOne(); + if (message) { + messages = [message]; } - if (messages && messages.length > 0) { - + if (this.messageHandler) { + try { + const payload = message.payload.payload || {}; + console.log("test-payload", payload); + this.messageHandler(type, payload); + } catch (error) { + console.error( + `Error processing message for topic ${type}:`, + error + ); + } + } if (this.options.autoCommit) { - await this.connection!.commit(); + await this.connection.commit(); console.log( `Transaction committed for ${messages.length} message(s)` ); } } } - } catch (error: any) { - console.error("Fatal error during consumption:", error.message); + } catch (error) { + console.error("Fatal error during consumption:", error); throw error; } } @@ -178,3 +172,4 @@ export class TxEventQAdapter implements EventAdapter { return backlogMap; } } + diff --git a/package.json b/package.json index 2386e52..4ad0680 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.36", + "version": "1.1.38", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 1214cccf06f9f6f25df1c4f5ffd93e65b35d4ac3 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Mon, 13 Oct 2025 18:06:33 +0300 Subject: [PATCH 26/35] Remove debug log from TxEventQAdapter Eliminated a console.log statement used for debugging payloads in the message handler. This cleans up console output during normal operation. --- client/adapters/TxEventQAdapter.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 05d6356..22fd109 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -127,7 +127,6 @@ export class TxEventQAdapter implements EventAdapter { if (this.messageHandler) { try { const payload = message.payload.payload || {}; - console.log("test-payload", payload); this.messageHandler(type, payload); } catch (error) { console.error( From 1686999f3fc9e1b9dbf23abd5dc735ecc3e35a8d Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 14 Oct 2025 14:41:52 +0300 Subject: [PATCH 27/35] Refactor metrics to use dedicated registry Introduces a dedicated client.Registry for EventMetrics and registers all metrics to it. This improves encapsulation and avoids polluting the global registry, ensuring metrics are scoped to the EventMetrics instance. --- client/metrics.ts | 126 +++++++++++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 52 deletions(-) diff --git a/client/metrics.ts b/client/metrics.ts index f28211e..cd5592a 100644 --- a/client/metrics.ts +++ b/client/metrics.ts @@ -8,59 +8,81 @@ export interface PushgatewayConfig { } export class EventMetrics { + private readonly registry: client.Registry; private pushgatewayInterval?: NodeJS.Timeout; private pushgatewayConfig?: PushgatewayConfig; - private readonly publishCounter = new client.Counter({ - name: "events_published_total", - help: "Total number of events published", - labelNames: ["event_type"], - }); - - private readonly subscriptionGauge = new client.Gauge({ - name: "active_event_subscriptions", - help: "Number of active event subscriptions", - labelNames: ["event_type"], - }); - - private readonly publishDuration = new client.Histogram({ - name: "event_publish_duration_seconds", - help: "Time taken to publish events", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], - }); - - private readonly payloadSize = new client.Histogram({ - name: "event_payload_size_bytes", - help: "Size of event payloads in bytes", - labelNames: ["event_type"], - buckets: [10, 100, 1000, 10000, 100000, 1000000], - }); - - private readonly publishErrors = new client.Counter({ - name: "event_publish_errors_total", - help: "Total number of event publish errors", - labelNames: ["event_type", "error_type"], - }); - - private readonly callbackDuration = new client.Histogram({ - name: "event_callback_duration_seconds", - help: "Time taken to process event callbacks", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], - }); - - private readonly throughput = new client.Counter({ - name: "event_callbacks_processed_total", - help: "Total number of event callbacks processed successfully", - labelNames: ["event_type"], - }); - - private readonly kafkaBacklog = new client.Gauge({ - name: "kafka_backlog_events_total", - help: "Total number of events waiting to be processed", - labelNames: ["topic"], - }); + private readonly publishCounter: client.Counter; + private readonly subscriptionGauge: client.Gauge; + private readonly publishDuration: client.Histogram; + private readonly payloadSize: client.Histogram; + private readonly publishErrors: client.Counter; + private readonly callbackDuration: client.Histogram; + private readonly throughput: client.Counter; + private readonly kafkaBacklog: client.Gauge; + + constructor() { + this.registry = new client.Registry(); + + this.publishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], + registers: [this.registry], + }); + + this.subscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], + registers: [this.registry], + }); + + this.publishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + registers: [this.registry], + }); + + this.payloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], + registers: [this.registry], + }); + + this.publishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], + registers: [this.registry], + }); + + this.callbackDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + registers: [this.registry], + }); + + this.throughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], + registers: [this.registry], + }); + + this.kafkaBacklog = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], + registers: [this.registry], + }); + } recordPublish(type: string, payloadSizeBytes: number): () => void { this.publishCounter.labels(type).inc(); @@ -120,7 +142,7 @@ export class EventMetrics { } try { - const body = await client.register.metrics(); + const body = await this.registry.metrics(); let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; if (this.pushgatewayConfig.instance) { @@ -146,4 +168,4 @@ export class EventMetrics { getPushgatewayConfig(): PushgatewayConfig | undefined { return this.pushgatewayConfig; } -} +} \ No newline at end of file From c670caa3ad07a33d64d76fe64d18ae53132d3ad4 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 14 Oct 2025 14:48:24 +0300 Subject: [PATCH 28/35] Refactor EventMetrics to use dedicated registry EventMetrics now uses its own prom-client Registry for metric registration and export, improving isolation and compatibility with multiple metric sources. Also removed a debug log from TxEventQAdapter and bumped package version to 1.1.39. --- client/adapters/TxEventQAdapter.js | 1 - client/metrics.d.ts | 2 + client/metrics.js | 108 +++++++++++++++++------------ package.json | 2 +- 4 files changed, 67 insertions(+), 46 deletions(-) diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index eec30f7..3c90617 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -138,7 +138,6 @@ class TxEventQAdapter { if (this.messageHandler) { try { const payload = message.payload.payload || {}; - console.log("test-payload", payload); this.messageHandler(type, payload); } catch (error) { diff --git a/client/metrics.d.ts b/client/metrics.d.ts index 4d9dcf9..ff344f8 100644 --- a/client/metrics.d.ts +++ b/client/metrics.d.ts @@ -5,6 +5,7 @@ export interface PushgatewayConfig { interval?: number; } export declare class EventMetrics { + private readonly registry; private pushgatewayInterval?; private pushgatewayConfig?; private readonly publishCounter; @@ -15,6 +16,7 @@ export declare class EventMetrics { private readonly callbackDuration; private readonly throughput; private readonly kafkaBacklog; + constructor(); recordPublish(type: string, payloadSizeBytes: number): () => void; recordPublishError(type: string, errorType: string): void; recordCallback(type: string): () => void; diff --git a/client/metrics.js b/client/metrics.js index 448a593..b0582a0 100644 --- a/client/metrics.js +++ b/client/metrics.js @@ -36,51 +36,71 @@ Object.defineProperty(exports, "__esModule", { value: true }); exports.EventMetrics = void 0; const client = __importStar(require("prom-client")); class EventMetrics { + registry; pushgatewayInterval; pushgatewayConfig; - publishCounter = new client.Counter({ - name: "events_published_total", - help: "Total number of events published", - labelNames: ["event_type"], - }); - subscriptionGauge = new client.Gauge({ - name: "active_event_subscriptions", - help: "Number of active event subscriptions", - labelNames: ["event_type"], - }); - publishDuration = new client.Histogram({ - name: "event_publish_duration_seconds", - help: "Time taken to publish events", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], - }); - payloadSize = new client.Histogram({ - name: "event_payload_size_bytes", - help: "Size of event payloads in bytes", - labelNames: ["event_type"], - buckets: [10, 100, 1000, 10000, 100000, 1000000], - }); - publishErrors = new client.Counter({ - name: "event_publish_errors_total", - help: "Total number of event publish errors", - labelNames: ["event_type", "error_type"], - }); - callbackDuration = new client.Histogram({ - name: "event_callback_duration_seconds", - help: "Time taken to process event callbacks", - labelNames: ["event_type"], - buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], - }); - throughput = new client.Counter({ - name: "event_callbacks_processed_total", - help: "Total number of event callbacks processed successfully", - labelNames: ["event_type"], - }); - kafkaBacklog = new client.Gauge({ - name: "kafka_backlog_events_total", - help: "Total number of events waiting to be processed", - labelNames: ["topic"], - }); + publishCounter; + subscriptionGauge; + publishDuration; + payloadSize; + publishErrors; + callbackDuration; + throughput; + kafkaBacklog; + constructor() { + this.registry = new client.Registry(); + this.publishCounter = new client.Counter({ + name: "events_published_total", + help: "Total number of events published", + labelNames: ["event_type"], + registers: [this.registry], + }); + this.subscriptionGauge = new client.Gauge({ + name: "active_event_subscriptions", + help: "Number of active event subscriptions", + labelNames: ["event_type"], + registers: [this.registry], + }); + this.publishDuration = new client.Histogram({ + name: "event_publish_duration_seconds", + help: "Time taken to publish events", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + registers: [this.registry], + }); + this.payloadSize = new client.Histogram({ + name: "event_payload_size_bytes", + help: "Size of event payloads in bytes", + labelNames: ["event_type"], + buckets: [10, 100, 1000, 10000, 100000, 1000000], + registers: [this.registry], + }); + this.publishErrors = new client.Counter({ + name: "event_publish_errors_total", + help: "Total number of event publish errors", + labelNames: ["event_type", "error_type"], + registers: [this.registry], + }); + this.callbackDuration = new client.Histogram({ + name: "event_callback_duration_seconds", + help: "Time taken to process event callbacks", + labelNames: ["event_type"], + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + registers: [this.registry], + }); + this.throughput = new client.Counter({ + name: "event_callbacks_processed_total", + help: "Total number of event callbacks processed successfully", + labelNames: ["event_type"], + registers: [this.registry], + }); + this.kafkaBacklog = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", + labelNames: ["topic"], + registers: [this.registry], + }); + } recordPublish(type, payloadSizeBytes) { this.publishCounter.labels(type).inc(); this.payloadSize.labels(type).observe(payloadSizeBytes); @@ -124,7 +144,7 @@ class EventMetrics { throw new Error("Pushgateway not configured. Call startPushgateway() first."); } try { - const body = await client.register.metrics(); + const body = await this.registry.metrics(); let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; if (this.pushgatewayConfig.instance) { url += `/instance/${this.pushgatewayConfig.instance}`; diff --git a/package.json b/package.json index 4ad0680..bb85511 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.38", + "version": "1.1.39", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From 45783bb2717940bc62279a35dcf1b8693ad9b4fd Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Thu, 16 Oct 2025 19:24:15 +0300 Subject: [PATCH 29/35] Add new structure --- client/adapters/TxEventQAdapter.d.ts | 3 +++ client/adapters/TxEventQAdapter.js | 27 +++++++++++++++++--- client/adapters/TxEventQAdapter.ts | 37 ++++++++++++++++++++++++---- client/eventManager.js | 9 ++++--- client/eventManager.ts | 10 +++++--- client/types/types.d.ts | 2 ++ client/types/types.ts | 2 ++ package-lock.json | 22 +++++++++++++++-- package.json | 1 + 9 files changed, 97 insertions(+), 16 deletions(-) diff --git a/client/adapters/TxEventQAdapter.d.ts b/client/adapters/TxEventQAdapter.d.ts index 7b2109f..2ae3eb9 100644 --- a/client/adapters/TxEventQAdapter.d.ts +++ b/client/adapters/TxEventQAdapter.d.ts @@ -3,6 +3,7 @@ export declare class TxEventQAdapter implements EventAdapter { private readonly options; private connection; private queue; + private queueCache; private messageHandler?; private isRunning; constructor(options: { @@ -10,6 +11,7 @@ export declare class TxEventQAdapter implements EventAdapter { user: string; password: string; instantClientPath?: string; + walletPath?: string; consumerName?: string; batchSize?: number; waitTime?: number; @@ -17,6 +19,7 @@ export declare class TxEventQAdapter implements EventAdapter { }); connect(): Promise; disconnect(): Promise; + private getOrCreateQueue; publish(type: string, payload: T): Promise; subscribe(type: string): Promise; unsubscribe(type: string): Promise; diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index 3c90617..31f665f 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -39,6 +39,7 @@ class TxEventQAdapter { options; connection = null; queue = null; + queueCache = new Map(); messageHandler; isRunning = false; constructor(options) { @@ -50,6 +51,8 @@ class TxEventQAdapter { try { oracledb.initOracleClient({ libDir: this.options.instantClientPath, + configDir: this.options.walletPath, + walletPath: this.options.walletPath, }); console.log("Oracle Thick client initialized"); } @@ -64,6 +67,8 @@ class TxEventQAdapter { connectString: this.options.connectString, user: this.options.user, password: this.options.password, + configDir: this.options.walletPath, + walletPath: this.options.walletPath, }); this.isRunning = true; console.log("TxEventQ adapter connected successfully"); @@ -77,6 +82,8 @@ class TxEventQAdapter { this.isRunning = false; if (this.connection) { try { + // Clear the queue cache + this.queueCache.clear(); await this.connection.close(); console.log("TxEventQ connection closed"); } @@ -87,13 +94,27 @@ class TxEventQAdapter { this.queue = null; } } + async getOrCreateQueue(queueName, options) { + if (!this.connection) { + throw new Error("TxEventQAdapter not connected"); + } + // Check if queue is already cached + if (this.queueCache.has(queueName)) { + return this.queueCache.get(queueName); + } + // Create new queue and cache it + const queue = await this.connection.getQueue(queueName, options); + this.queueCache.set(queueName, queue); + console.log(`Queue ${queueName} cached`); + return queue; + } async publish(type, payload) { if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } try { - const queueName = `TXEVENTQ_USER.${type}`; - this.queue = await this.connection.getQueue(queueName, { + const queueName = type; + this.queue = await this.getOrCreateQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, }); const message = { @@ -121,7 +142,7 @@ class TxEventQAdapter { } this.isRunning = true; const queueName = `TXEVENTQ_USER.${type}`; - this.queue = await this.connection.getQueue(queueName, { + this.queue = await this.getOrCreateQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, }); this.queue.deqOptions.wait = 5000; diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 22fd109..b393ff6 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -5,6 +5,7 @@ import { EventAdapter } from "../types/types"; export class TxEventQAdapter implements EventAdapter { private connection: oracledb.Connection | null = null; private queue: oracledb.AdvancedQueue | null = null; + private queueCache: Map> = new Map(); private messageHandler?: (type: string, payload: object) => void; private isRunning: boolean = false; @@ -14,6 +15,7 @@ export class TxEventQAdapter implements EventAdapter { user: string; password: string; instantClientPath?: string; + walletPath?: string; consumerName?: string; batchSize?: number; waitTime?: number; @@ -27,6 +29,8 @@ export class TxEventQAdapter implements EventAdapter { try { oracledb.initOracleClient({ libDir: this.options.instantClientPath, + configDir: this.options.walletPath, + walletPath: this.options.walletPath, }); console.log("Oracle Thick client initialized"); } catch (initError: any) { @@ -41,6 +45,8 @@ export class TxEventQAdapter implements EventAdapter { connectString: this.options.connectString, user: this.options.user, password: this.options.password, + configDir: this.options.walletPath, + walletPath: this.options.walletPath, }); this.isRunning = true; @@ -57,6 +63,8 @@ export class TxEventQAdapter implements EventAdapter { if (this.connection) { try { + this.queueCache.clear(); + await this.connection.close(); console.log("TxEventQ connection closed"); } catch (error) { @@ -67,15 +75,35 @@ export class TxEventQAdapter implements EventAdapter { } } + private async getOrCreateQueue( + queueName: string, + options: any + ): Promise> { + if (!this.connection) { + throw new Error("TxEventQAdapter not connected"); + } + + if (this.queueCache.has(queueName)) { + return this.queueCache.get(queueName)!; + } + + const queue = await this.connection.getQueue(queueName, options); + this.queueCache.set(queueName, queue); + + console.log(`Queue ${queueName} cached`); + + return queue; + } + async publish(type: string, payload: T): Promise { if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } try { - const queueName = `TXEVENTQ_USER.${type}`; + const queueName = type; - this.queue = await this.connection.getQueue(queueName, { + this.queue = await this.getOrCreateQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, } as any); @@ -108,7 +136,7 @@ export class TxEventQAdapter implements EventAdapter { const queueName = `TXEVENTQ_USER.${type}`; - this.queue = await this.connection.getQueue(queueName, { + this.queue = await this.getOrCreateQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, }); @@ -170,5 +198,4 @@ export class TxEventQAdapter implements EventAdapter { // TODO: Implement backlog calculation for TxEventQ return backlogMap; } -} - +} \ No newline at end of file diff --git a/client/eventManager.js b/client/eventManager.js index 7e9d0d0..ae6731a 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -5,7 +5,7 @@ const metrics_1 = require("./metrics"); const KafkaAdapter_1 = require("./adapters/KafkaAdapter"); const SocketAdapter_1 = require("./adapters/SocketAdapter"); const TxEventQAdapter_1 = require("./adapters/TxEventQAdapter"); -const KAFKA_TOPICS = [ +const TOPICS = [ "KNOWLEDGE_CREATED", "MESSAGE_USER_MESSAGED", "SESSION_USER_MESSAGED", @@ -14,6 +14,8 @@ const KAFKA_TOPICS = [ "STEP_COMPLETED", "MESSAGE_USER_MESSAGED", "MESSAGE_ASSISTANT_MESSAGED", + "RESPONSIBILITY_CREATED", + "RESPONSIBILITY_DESCRIPTION_GENERATED", "SESSION_INITIATED", "SESSION_USER_MESSAGED", "SESSION_AI_MESSAGED", @@ -45,7 +47,7 @@ class EventManager { clientId: options.clientId, brokers: options.brokers, groupId: options.groupId, - topics: KAFKA_TOPICS, + topics: TOPICS, }); this.startBacklogMonitoring(); break; @@ -55,6 +57,7 @@ class EventManager { user: options.user, password: options.password, instantClientPath: options.instantClientPath, + walletPath: options.walletPath, consumerName: options.consumerName, batchSize: options.batchSize, waitTime: options.waitTime, @@ -166,7 +169,7 @@ class EventManager { if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) return; try { - const backlog = await this.adapter.getBacklog(KAFKA_TOPICS); + const backlog = await this.adapter.getBacklog(TOPICS); backlog.forEach((size, topic) => { this.metrics.updateKafkaBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); diff --git a/client/eventManager.ts b/client/eventManager.ts index deb724a..8508ce6 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -5,7 +5,7 @@ import { KafkaAdapter } from "./adapters/KafkaAdapter"; import { SocketAdapter } from "./adapters/SocketAdapter"; import { TxEventQAdapter } from "./adapters/TxEventQAdapter"; -const KAFKA_TOPICS = [ +const TOPICS = [ "KNOWLEDGE_CREATED", "MESSAGE_USER_MESSAGED", "SESSION_USER_MESSAGED", @@ -14,6 +14,8 @@ const KAFKA_TOPICS = [ "STEP_COMPLETED", "MESSAGE_USER_MESSAGED", "MESSAGE_ASSISTANT_MESSAGED", + "RESPONSIBILITY_CREATED", + "RESPONSIBILITY_DESCRIPTION_GENERATED", "SESSION_INITIATED", "SESSION_USER_MESSAGED", "SESSION_AI_MESSAGED", @@ -45,7 +47,7 @@ export class EventManager { clientId: options.clientId, brokers: options.brokers, groupId: options.groupId, - topics: KAFKA_TOPICS, + topics: TOPICS, }); this.startBacklogMonitoring(); break; @@ -56,6 +58,7 @@ export class EventManager { user: options.user, password: options.password, instantClientPath: options.instantClientPath, + walletPath: options.walletPath, consumerName: options.consumerName, batchSize: options.batchSize, waitTime: options.waitTime, @@ -190,7 +193,7 @@ export class EventManager { if (!(this.adapter instanceof KafkaAdapter)) return; try { - const backlog = await this.adapter.getBacklog(KAFKA_TOPICS); + const backlog = await this.adapter.getBacklog(TOPICS); backlog.forEach((size, topic) => { this.metrics.updateKafkaBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); @@ -220,3 +223,4 @@ export class EventManager { return this.metrics.getPushgatewayConfig(); } } + diff --git a/client/types/types.d.ts b/client/types/types.d.ts index f526111..89cfef2 100644 --- a/client/types/types.d.ts +++ b/client/types/types.d.ts @@ -28,8 +28,10 @@ export interface TxEventQOptions extends BaseInitOptions { user: string; password: string; instantClientPath?: string; + walletPath?: string; consumerName?: string; batchSize?: number; waitTime?: number; + topics?: string[]; } export type InitOptions = InMemoryOptions | KafkaOptions | TxEventQOptions; diff --git a/client/types/types.ts b/client/types/types.ts index 366abb1..5500182 100644 --- a/client/types/types.ts +++ b/client/types/types.ts @@ -33,9 +33,11 @@ export interface TxEventQOptions extends BaseInitOptions { user: string; password: string; instantClientPath?: string; + walletPath?: string; consumerName?: string; batchSize?: number; waitTime?: number; + topics?: string[]; } export type InitOptions = InMemoryOptions | KafkaOptions | TxEventQOptions; diff --git a/package-lock.json b/package-lock.json index c103d67..9e3fd75 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,15 +1,16 @@ { "name": "node-event-test-package", - "version": "1.1.31", + "version": "1.1.39", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "node-event-test-package", - "version": "1.1.31", + "version": "1.1.39", "dependencies": { "chalk": "^4.1.2", "kafkajs": "^2.2.4", + "node-event-test-package": "^1.1.45", "oracledb": "^6.9.0", "prom-client": "^15.1.3", "socket.io": "^4.8.1", @@ -4398,6 +4399,23 @@ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", "dev": true }, + "node_modules/node-event-test-package": { + "version": "1.1.45", + "resolved": "https://registry.npmjs.org/node-event-test-package/-/node-event-test-package-1.1.45.tgz", + "integrity": "sha512-RzPYZZ4mUV6DF/ahkztFgvyFyLxop3fMpz4C/dJQOWr//RLlMWKoG1FFg6q32rXMaCH/jF768ak5TnDWQv0g+w==", + "dependencies": { + "chalk": "^4.1.2", + "kafkajs": "^2.2.4", + "oracledb": "^6.9.0", + "prom-client": "^15.1.3", + "socket.io": "^4.8.1", + "socket.io-client": "^4.8.1", + "uuid": "^9.0.0" + }, + "bin": { + "server": "server/server.js" + } + }, "node_modules/node-int64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", diff --git a/package.json b/package.json index bb85511..51c7207 100644 --- a/package.json +++ b/package.json @@ -17,6 +17,7 @@ "dependencies": { "chalk": "^4.1.2", "kafkajs": "^2.2.4", + "node-event-test-package": "^1.1.45", "oracledb": "^6.9.0", "prom-client": "^15.1.3", "socket.io": "^4.8.1", From 587354f6bf9b1f7039e23bc6da10c28c5eafc9cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Mon, 20 Oct 2025 12:56:03 +0300 Subject: [PATCH 30/35] Add event history tracking and unify backlog metrics Introduces getHistory to TxEventQAdapter for retrieving recent event history per topic. EventManager now supports backlog/history monitoring for both Kafka and TxEventQ adapters, updating a unified backlog metric. Metrics are refactored to use a generic eventBacklog gauge instead of Kafka-specific naming, and Pushgateway integration is improved for periodic metric pushes. --- client/adapters/TxEventQAdapter.ts | 149 +++++++++++++++++++++++++---- client/eventManager.ts | 100 +++++++++++++------ client/metrics.ts | 29 +++--- 3 files changed, 216 insertions(+), 62 deletions(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index b393ff6..eb2f563 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -20,12 +20,13 @@ export class TxEventQAdapter implements EventAdapter { batchSize?: number; waitTime?: number; autoCommit?: boolean; + queueOwner?: string; } ) {} async connect(): Promise { try { - if (this.options.instantClientPath && oracledb.thin) { + if (this.options.instantClientPath && (oracledb as any).thin) { try { oracledb.initOracleClient({ libDir: this.options.instantClientPath, @@ -64,7 +65,7 @@ export class TxEventQAdapter implements EventAdapter { if (this.connection) { try { this.queueCache.clear(); - + await this.connection.close(); console.log("TxEventQ connection closed"); } catch (error) { @@ -89,9 +90,9 @@ export class TxEventQAdapter implements EventAdapter { const queue = await this.connection.getQueue(queueName, options); this.queueCache.set(queueName, queue); - + console.log(`Queue ${queueName} cached`); - + return queue; } @@ -104,7 +105,7 @@ export class TxEventQAdapter implements EventAdapter { const queueName = type; this.queue = await this.getOrCreateQueue(queueName, { - payloadType: oracledb.DB_TYPE_JSON, + payloadType: (oracledb as any).DB_TYPE_JSON, } as any); const message = { @@ -135,18 +136,18 @@ export class TxEventQAdapter implements EventAdapter { this.isRunning = true; const queueName = `TXEVENTQ_USER.${type}`; - + this.queue = await this.getOrCreateQueue(queueName, { - payloadType: oracledb.DB_TYPE_JSON, + payloadType: (oracledb as any).DB_TYPE_JSON, }); - + this.queue.deqOptions.wait = 5000; this.queue.deqOptions.consumerName = this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { let messages: oracledb.AdvancedQueueMessage[] = []; - + const message = await this.queue.deqOne(); if (message) { messages = [message]; @@ -154,7 +155,7 @@ export class TxEventQAdapter implements EventAdapter { if (messages && messages.length > 0) { if (this.messageHandler) { try { - const payload = message.payload.payload || {}; + const payload = (message as any).payload?.payload || {}; this.messageHandler(type, payload); } catch (error) { console.error( @@ -163,7 +164,7 @@ export class TxEventQAdapter implements EventAdapter { ); } } - if (this.options.autoCommit) { + if (this.options.autoCommit && this.connection) { await this.connection.commit(); console.log( `Transaction committed for ${messages.length} message(s)` @@ -177,7 +178,7 @@ export class TxEventQAdapter implements EventAdapter { } } - async unsubscribe(type: string): Promise { + async unsubscribe(_type: string): Promise { if (!this.connection) { throw new Error("Subscriber not initialized"); } @@ -189,13 +190,123 @@ export class TxEventQAdapter implements EventAdapter { this.messageHandler = handler; } - async getBacklog(topics: string[]): Promise> { - const backlogMap = new Map(); + async getHistory( + topics: string[], + options?: { since?: Date; limitPerTopic?: number; newestFirst?: boolean } + ): Promise>> { + const result = new Map< + string, + Array<{ enqueuedAt?: Date | null; state?: string }> + >(); + if (!topics.length) return result; + if (!this.connection) { + for (const t of topics) result.set(t, []); + return result; + } + + const owner = (this.options.queueOwner || "TXEVENTQ_USER").toUpperCase(); + const limit = options?.limitPerTopic ?? 50; + const newestFirst = options?.newestFirst !== false; + const since = options?.since; + + for (const topic of topics) { + const qname = topic.toUpperCase(); + try { + const resAny = (await this.connection.execute( + `SELECT OWNER, NAME, QUEUE_TABLE + FROM ALL_QUEUES + WHERE UPPER(OWNER) = :owner AND UPPER(NAME) = :name`, + { owner, name: qname }, + { outFormat: (oracledb as any).OUT_FORMAT_OBJECT } + )) as unknown as { + rows?: Array<{ OWNER: string; NAME: string; QUEUE_TABLE: string }>; + }; + + const row = resAny.rows?.[0]; + if (!row) { + result.set(topic, []); + continue; + } - if (topics.length === 0) { - return backlogMap; + const tab = `AQ$${String(row.QUEUE_TABLE).toUpperCase()}`; + const colsAny = (await this.connection.execute( + `SELECT COLUMN_NAME, DATA_TYPE + FROM ALL_TAB_COLUMNS + WHERE OWNER = :owner AND TABLE_NAME = :tab + AND COLUMN_NAME IN ('Q_NAME','QUEUE','QUEUE_NAME','MSG_STATE','STATE','ENQ_TIME')`, + { owner, tab }, + { outFormat: (oracledb as any).OUT_FORMAT_OBJECT } + )) as unknown as { + rows?: Array<{ COLUMN_NAME: string; DATA_TYPE: string }>; + }; + + const present = new Map(); + for (const r of colsAny.rows || []) { + present.set( + String(r.COLUMN_NAME).toUpperCase(), + String(r.DATA_TYPE).toUpperCase() + ); + } + const qNameCol = + (present.has("Q_NAME") && "Q_NAME") || + (present.has("QUEUE") && "QUEUE") || + (present.has("QUEUE_NAME") && "QUEUE_NAME"); + const stateCol = + (present.has("MSG_STATE") && "MSG_STATE") || + (present.has("STATE") && "STATE"); + const hasEnqTime = present.has("ENQ_TIME"); + if (!qNameCol) { + result.set(topic, []); + continue; + } + + const tableFqn = `${owner}.${tab}`; + const where: string[] = [`UPPER(${qNameCol}) = :qname`]; + const binds: Record = { qname }; + + if (since && hasEnqTime) { + where.push("ENQ_TIME >= :since"); + binds.since = since; + } + + const orderCol = hasEnqTime ? "ENQ_TIME" : stateCol || qNameCol; + const orderDir = newestFirst ? "DESC" : "ASC"; + const selectCols = [ + stateCol ? `${stateCol} AS STATE` : `NULL AS STATE`, + hasEnqTime ? `ENQ_TIME` : `NULL AS ENQ_TIME`, + ].join(", "); + + const sql = ` + SELECT * FROM ( + SELECT ${selectCols} + FROM ${tableFqn} + WHERE ${where.join(" AND ")} + ORDER BY ${orderCol} ${orderDir} + ) + WHERE ROWNUM <= :limit_n + `; + binds.limit_n = limit; + + const rowsAny = (await this.connection.execute(sql, binds, { + outFormat: (oracledb as any).OUT_FORMAT_OBJECT, + })) as unknown as { + rows?: Array<{ STATE?: number | string; ENQ_TIME?: Date | null }>; + }; + + const rows = rowsAny.rows || []; + result.set( + topic, + rows.map((r) => ({ + enqueuedAt: r.ENQ_TIME ?? null, + state: r.STATE != null ? String(r.STATE) : undefined, + })) + ); + } catch (err) { + console.error(`Error fetching history for topic ${topic}:`, err); + result.set(topic, []); + } } - // TODO: Implement backlog calculation for TxEventQ - return backlogMap; + + return result; } -} \ No newline at end of file +} diff --git a/client/eventManager.ts b/client/eventManager.ts index 8508ce6..0e4bdbe 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -25,15 +25,20 @@ const TOPICS = [ "KNOWLEDGES_LOADED", "MESSAGES_LOADED", ]; + export class EventManager { private adapter: EventAdapter | null = null; private callbacks: Map> = new Map(); private metrics = new EventMetrics(); private backlogInterval: NodeJS.Timeout | null = null; + + private historySince?: Date; + async init(options: InitOptions): Promise { if (this.adapter) { await this.disconnect(); } + switch (options.type) { case "inMemory": this.adapter = new SocketAdapter({ @@ -49,9 +54,7 @@ export class EventManager { groupId: options.groupId, topics: TOPICS, }); - this.startBacklogMonitoring(); break; - case "txeventq": this.adapter = new TxEventQAdapter({ connectString: options.connectString, @@ -64,29 +67,33 @@ export class EventManager { waitTime: options.waitTime, }); break; - default: throw new Error(`Unknown adapter type`); } + await this.adapter.connect(); this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); + + this.metrics.seedBacklogMetrics(TOPICS); + + this.startBacklogMonitoring(); } + async publish( ...args: [...string[], T] ): Promise { - if (args.length < 1) { + if (args.length < 1) throw new Error("publish requires at least one event type and a payload"); - } - if (!this.adapter) { - throw new Error("Event system not initialized"); - } + if (!this.adapter) throw new Error("Event system not initialized"); + const payload = args[args.length - 1] as T; const type = args.slice(0, -1) as string[]; const mergedType = type.join("_"); this.validateEventType(mergedType); + const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); try { @@ -99,17 +106,15 @@ export class EventManager { throw error; } } + async subscribe( type: string, callback: Callback ): Promise<() => void> { - if (!this.callbacks.has(type)) { - this.callbacks.set(type, new Set()); - } + if (!this.callbacks.has(type)) this.callbacks.set(type, new Set()); const callbackSet = this.callbacks.get(type)!; callbackSet.add(callback as Callback); - this.metrics.updateSubscriptions(type, callbackSet.size); if (this.adapter && callbackSet.size === 1) { @@ -118,26 +123,20 @@ export class EventManager { return async () => { callbackSet.delete(callback as Callback); - if (callbackSet.size === 0) { this.callbacks.delete(type); - if (this.adapter) { - await this.adapter.unsubscribe(type); - } + if (this.adapter) await this.adapter.unsubscribe(type); } - this.metrics.updateSubscriptions(type, callbackSet.size); }; } async disconnect(): Promise { this.stopBacklogMonitoring(); - if (this.adapter) { await this.adapter.disconnect(); this.adapter = null; } - this.callbacks.clear(); } @@ -147,7 +146,7 @@ export class EventManager { private executeCallbacks(type: string, payload: object): void { const callbackSet = this.callbacks.get(type); - if (!callbackSet) return; // No callbacks for this topic - message ignored + if (!callbackSet) return; callbackSet.forEach((callback) => { setTimeout(() => { @@ -173,12 +172,19 @@ export class EventManager { } private startBacklogMonitoring(intervalMs: number = 30000): void { - if (!(this.adapter instanceof KafkaAdapter)) return; + if (!this.adapter) return; - this.updateBacklogMetrics(); + if (!this.historySince) { + this.historySince = new Date(Date.now() - 24 * 60 * 60 * 1000); + } + this.updateBacklogMetrics().catch((e) => + console.error("Initial metrics update failed:", e) + ); this.backlogInterval = setInterval(() => { - this.updateBacklogMetrics(); + this.updateBacklogMetrics().catch((e) => + console.error("Periodic metrics update failed:", e) + ); }, intervalMs); } @@ -190,16 +196,49 @@ export class EventManager { } private async updateBacklogMetrics(): Promise { - if (!(this.adapter instanceof KafkaAdapter)) return; + if (!this.adapter) return; try { - const backlog = await this.adapter.getBacklog(TOPICS); - backlog.forEach((size, topic) => { - this.metrics.updateKafkaBacklog(topic, size); - console.log(`Backlog for topic ${topic}: ${size} messages`); - }); + if (this.adapter instanceof KafkaAdapter) { + const backlog = await this.adapter.getBacklog(TOPICS); + backlog.forEach((size, topic) => { + this.metrics.updateEventBacklog(topic, size); + }); + } else if (this.adapter instanceof TxEventQAdapter) { + const since = this.historySince; + const history = (await (this.adapter as any).getHistory(TOPICS, { + since, + limitPerTopic: 100, + newestFirst: true, + })) as Map>; + + let nextSince = since ?? new Date(0); + + for (const topic of TOPICS) { + const entries = history.get(topic) ?? []; + this.metrics.updateEventBacklog(topic, entries.length); + + for (const e of entries) { + const t = + e.enqueuedAt instanceof Date + ? e.enqueuedAt + : e.enqueuedAt + ? new Date(e.enqueuedAt) + : undefined; + if (t && t > nextSince) nextSince = t; + } + } + + if (nextSince && (!since || nextSince > since)) { + this.historySince = new Date(nextSince.getTime() + 1); + } + } + + if (this.metrics.getPushgatewayConfig()) { + await this.metrics.pushMetricsToGateway(); + } } catch (error) { - console.error("Error updating backlog metrics:", error); + console.error("Error updating backlog/history metrics:", error); } } @@ -223,4 +262,3 @@ export class EventManager { return this.metrics.getPushgatewayConfig(); } } - diff --git a/client/metrics.ts b/client/metrics.ts index cd5592a..a511da7 100644 --- a/client/metrics.ts +++ b/client/metrics.ts @@ -19,7 +19,7 @@ export class EventMetrics { private readonly publishErrors: client.Counter; private readonly callbackDuration: client.Histogram; private readonly throughput: client.Counter; - private readonly kafkaBacklog: client.Gauge; + private readonly eventBacklog: client.Gauge; constructor() { this.registry = new client.Registry(); @@ -76,9 +76,9 @@ export class EventMetrics { registers: [this.registry], }); - this.kafkaBacklog = new client.Gauge({ - name: "kafka_backlog_events_total", - help: "Total number of events waiting to be processed", + this.eventBacklog = new client.Gauge({ + name: "backlog_events_total", + help: "Total events waiting to be processed (or recent history count for TxEventQ), labeled by topic", labelNames: ["topic"], registers: [this.registry], }); @@ -103,8 +103,14 @@ export class EventMetrics { this.subscriptionGauge.labels(type).set(count); } - updateKafkaBacklog(topic: string, size: number): void { - this.kafkaBacklog.labels(topic).set(size); + updateEventBacklog(topic: string, size: number): void { + this.eventBacklog.labels(topic).set(size); + } + + seedBacklogMetrics(topics: string[]): void { + for (const topic of topics) { + this.eventBacklog.labels(topic).set(0); + } } startPushgateway(config: PushgatewayConfig = {}): void { @@ -117,6 +123,8 @@ export class EventMetrics { this.stopPushgateway(); + this.pushMetricsToGateway().catch(() => {}); + this.pushgatewayInterval = setInterval(() => { this.pushMetricsToGateway(); }, this.pushgatewayConfig.interval); @@ -142,24 +150,21 @@ export class EventMetrics { } try { - const body = await this.registry.metrics(); + const body = await this.registry.metrics(); let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; - if (this.pushgatewayConfig.instance) { url += `/instance/${this.pushgatewayConfig.instance}`; } const response = await fetch(url, { method: "POST", - headers: { "Content-Type": "text/plain" }, + headers: { "Content-Type": "text/plain; version=0.0.4; charset=utf-8" }, body, }); if (!response.ok) { throw new Error(`HTTP ${response.status}: ${response.statusText}`); } - - console.log("Metrics pushed to Pushgateway successfully"); } catch (err) { console.error("Failed to push metrics to Pushgateway:", err); } @@ -168,4 +173,4 @@ export class EventMetrics { getPushgatewayConfig(): PushgatewayConfig | undefined { return this.pushgatewayConfig; } -} \ No newline at end of file +} From e87d1c35dbe156359a3797a8e73e39e91ceffd07 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Mon, 20 Oct 2025 17:22:31 +0300 Subject: [PATCH 31/35] Use then instead of await function --- client/adapters/TxEventQAdapter.ts | 32 ++++++++++++++---------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index eb2f563..af29ff9 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -101,32 +101,29 @@ export class TxEventQAdapter implements EventAdapter { throw new Error("TxEventQAdapter not connected"); } - try { - const queueName = type; + const queueName = type; - this.queue = await this.getOrCreateQueue(queueName, { - payloadType: (oracledb as any).DB_TYPE_JSON, - } as any); + this.queue = await this.getOrCreateQueue(queueName, { + payloadType: (oracledb as any).DB_TYPE_JSON, + } as any); - const message = { - topic: type, - payload: payload, - }; + const message = { + topic: type, + payload: payload, + }; - await this.queue.enqOne({ + this.queue + .enqOne({ payload: message, correlation: type, priority: 0, delay: 0, expiration: -1, exceptionQueue: "", - } as any); - - await this.connection.commit(); - } catch (error: any) { - console.error("Failed to publish event to TxEventQ:", error.message); - throw error; - } + } as any) + .then(() => { + this.connection.commit(); + }); } async subscribe(type: string): Promise { @@ -310,3 +307,4 @@ export class TxEventQAdapter implements EventAdapter { return result; } } + From fd12fb82ffeae2183402142bc5388f43f6a68fe1 Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 21 Oct 2025 10:24:25 +0300 Subject: [PATCH 32/35] Revert metrics changes --- client/adapters/TxEventQAdapter.ts | 152 ++++------------------------- client/eventManager.ts | 100 ++++++------------- client/metrics.ts | 29 +++--- 3 files changed, 63 insertions(+), 218 deletions(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index af29ff9..77e94da 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -20,13 +20,12 @@ export class TxEventQAdapter implements EventAdapter { batchSize?: number; waitTime?: number; autoCommit?: boolean; - queueOwner?: string; } ) {} async connect(): Promise { try { - if (this.options.instantClientPath && (oracledb as any).thin) { + if (this.options.instantClientPath && oracledb.thin) { try { oracledb.initOracleClient({ libDir: this.options.instantClientPath, @@ -65,7 +64,7 @@ export class TxEventQAdapter implements EventAdapter { if (this.connection) { try { this.queueCache.clear(); - + await this.connection.close(); console.log("TxEventQ connection closed"); } catch (error) { @@ -90,9 +89,9 @@ export class TxEventQAdapter implements EventAdapter { const queue = await this.connection.getQueue(queueName, options); this.queueCache.set(queueName, queue); - + console.log(`Queue ${queueName} cached`); - + return queue; } @@ -103,9 +102,9 @@ export class TxEventQAdapter implements EventAdapter { const queueName = type; - this.queue = await this.getOrCreateQueue(queueName, { - payloadType: (oracledb as any).DB_TYPE_JSON, - } as any); + this.queue = await this.getOrCreateQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + } as any); const message = { topic: type, @@ -133,18 +132,18 @@ export class TxEventQAdapter implements EventAdapter { this.isRunning = true; const queueName = `TXEVENTQ_USER.${type}`; - + this.queue = await this.getOrCreateQueue(queueName, { - payloadType: (oracledb as any).DB_TYPE_JSON, + payloadType: oracledb.DB_TYPE_JSON, }); - + this.queue.deqOptions.wait = 5000; this.queue.deqOptions.consumerName = this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { let messages: oracledb.AdvancedQueueMessage[] = []; - + const message = await this.queue.deqOne(); if (message) { messages = [message]; @@ -152,7 +151,7 @@ export class TxEventQAdapter implements EventAdapter { if (messages && messages.length > 0) { if (this.messageHandler) { try { - const payload = (message as any).payload?.payload || {}; + const payload = message.payload.payload || {}; this.messageHandler(type, payload); } catch (error) { console.error( @@ -161,7 +160,7 @@ export class TxEventQAdapter implements EventAdapter { ); } } - if (this.options.autoCommit && this.connection) { + if (this.options.autoCommit) { await this.connection.commit(); console.log( `Transaction committed for ${messages.length} message(s)` @@ -175,7 +174,7 @@ export class TxEventQAdapter implements EventAdapter { } } - async unsubscribe(_type: string): Promise { + async unsubscribe(type: string): Promise { if (!this.connection) { throw new Error("Subscriber not initialized"); } @@ -187,124 +186,13 @@ export class TxEventQAdapter implements EventAdapter { this.messageHandler = handler; } - async getHistory( - topics: string[], - options?: { since?: Date; limitPerTopic?: number; newestFirst?: boolean } - ): Promise>> { - const result = new Map< - string, - Array<{ enqueuedAt?: Date | null; state?: string }> - >(); - if (!topics.length) return result; - if (!this.connection) { - for (const t of topics) result.set(t, []); - return result; - } - - const owner = (this.options.queueOwner || "TXEVENTQ_USER").toUpperCase(); - const limit = options?.limitPerTopic ?? 50; - const newestFirst = options?.newestFirst !== false; - const since = options?.since; - - for (const topic of topics) { - const qname = topic.toUpperCase(); - try { - const resAny = (await this.connection.execute( - `SELECT OWNER, NAME, QUEUE_TABLE - FROM ALL_QUEUES - WHERE UPPER(OWNER) = :owner AND UPPER(NAME) = :name`, - { owner, name: qname }, - { outFormat: (oracledb as any).OUT_FORMAT_OBJECT } - )) as unknown as { - rows?: Array<{ OWNER: string; NAME: string; QUEUE_TABLE: string }>; - }; - - const row = resAny.rows?.[0]; - if (!row) { - result.set(topic, []); - continue; - } + async getBacklog(topics: string[]): Promise> { + const backlogMap = new Map(); - const tab = `AQ$${String(row.QUEUE_TABLE).toUpperCase()}`; - const colsAny = (await this.connection.execute( - `SELECT COLUMN_NAME, DATA_TYPE - FROM ALL_TAB_COLUMNS - WHERE OWNER = :owner AND TABLE_NAME = :tab - AND COLUMN_NAME IN ('Q_NAME','QUEUE','QUEUE_NAME','MSG_STATE','STATE','ENQ_TIME')`, - { owner, tab }, - { outFormat: (oracledb as any).OUT_FORMAT_OBJECT } - )) as unknown as { - rows?: Array<{ COLUMN_NAME: string; DATA_TYPE: string }>; - }; - - const present = new Map(); - for (const r of colsAny.rows || []) { - present.set( - String(r.COLUMN_NAME).toUpperCase(), - String(r.DATA_TYPE).toUpperCase() - ); - } - const qNameCol = - (present.has("Q_NAME") && "Q_NAME") || - (present.has("QUEUE") && "QUEUE") || - (present.has("QUEUE_NAME") && "QUEUE_NAME"); - const stateCol = - (present.has("MSG_STATE") && "MSG_STATE") || - (present.has("STATE") && "STATE"); - const hasEnqTime = present.has("ENQ_TIME"); - if (!qNameCol) { - result.set(topic, []); - continue; - } - - const tableFqn = `${owner}.${tab}`; - const where: string[] = [`UPPER(${qNameCol}) = :qname`]; - const binds: Record = { qname }; - - if (since && hasEnqTime) { - where.push("ENQ_TIME >= :since"); - binds.since = since; - } - - const orderCol = hasEnqTime ? "ENQ_TIME" : stateCol || qNameCol; - const orderDir = newestFirst ? "DESC" : "ASC"; - const selectCols = [ - stateCol ? `${stateCol} AS STATE` : `NULL AS STATE`, - hasEnqTime ? `ENQ_TIME` : `NULL AS ENQ_TIME`, - ].join(", "); - - const sql = ` - SELECT * FROM ( - SELECT ${selectCols} - FROM ${tableFqn} - WHERE ${where.join(" AND ")} - ORDER BY ${orderCol} ${orderDir} - ) - WHERE ROWNUM <= :limit_n - `; - binds.limit_n = limit; - - const rowsAny = (await this.connection.execute(sql, binds, { - outFormat: (oracledb as any).OUT_FORMAT_OBJECT, - })) as unknown as { - rows?: Array<{ STATE?: number | string; ENQ_TIME?: Date | null }>; - }; - - const rows = rowsAny.rows || []; - result.set( - topic, - rows.map((r) => ({ - enqueuedAt: r.ENQ_TIME ?? null, - state: r.STATE != null ? String(r.STATE) : undefined, - })) - ); - } catch (err) { - console.error(`Error fetching history for topic ${topic}:`, err); - result.set(topic, []); - } + if (topics.length === 0) { + return backlogMap; } - - return result; + // TODO: Implement backlog calculation for TxEventQ + return backlogMap; } } - diff --git a/client/eventManager.ts b/client/eventManager.ts index 0e4bdbe..8508ce6 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -25,20 +25,15 @@ const TOPICS = [ "KNOWLEDGES_LOADED", "MESSAGES_LOADED", ]; - export class EventManager { private adapter: EventAdapter | null = null; private callbacks: Map> = new Map(); private metrics = new EventMetrics(); private backlogInterval: NodeJS.Timeout | null = null; - - private historySince?: Date; - async init(options: InitOptions): Promise { if (this.adapter) { await this.disconnect(); } - switch (options.type) { case "inMemory": this.adapter = new SocketAdapter({ @@ -54,7 +49,9 @@ export class EventManager { groupId: options.groupId, topics: TOPICS, }); + this.startBacklogMonitoring(); break; + case "txeventq": this.adapter = new TxEventQAdapter({ connectString: options.connectString, @@ -67,33 +64,29 @@ export class EventManager { waitTime: options.waitTime, }); break; + default: throw new Error(`Unknown adapter type`); } - await this.adapter.connect(); this.adapter.onMessage((type, payload) => { this.handleIncomingMessage(type, payload); }); - - this.metrics.seedBacklogMetrics(TOPICS); - - this.startBacklogMonitoring(); } - async publish( ...args: [...string[], T] ): Promise { - if (args.length < 1) + if (args.length < 1) { throw new Error("publish requires at least one event type and a payload"); - if (!this.adapter) throw new Error("Event system not initialized"); - + } + if (!this.adapter) { + throw new Error("Event system not initialized"); + } const payload = args[args.length - 1] as T; const type = args.slice(0, -1) as string[]; const mergedType = type.join("_"); this.validateEventType(mergedType); - const payloadSize = JSON.stringify(payload).length; const endTimer = this.metrics.recordPublish(mergedType, payloadSize); try { @@ -106,15 +99,17 @@ export class EventManager { throw error; } } - async subscribe( type: string, callback: Callback ): Promise<() => void> { - if (!this.callbacks.has(type)) this.callbacks.set(type, new Set()); + if (!this.callbacks.has(type)) { + this.callbacks.set(type, new Set()); + } const callbackSet = this.callbacks.get(type)!; callbackSet.add(callback as Callback); + this.metrics.updateSubscriptions(type, callbackSet.size); if (this.adapter && callbackSet.size === 1) { @@ -123,20 +118,26 @@ export class EventManager { return async () => { callbackSet.delete(callback as Callback); + if (callbackSet.size === 0) { this.callbacks.delete(type); - if (this.adapter) await this.adapter.unsubscribe(type); + if (this.adapter) { + await this.adapter.unsubscribe(type); + } } + this.metrics.updateSubscriptions(type, callbackSet.size); }; } async disconnect(): Promise { this.stopBacklogMonitoring(); + if (this.adapter) { await this.adapter.disconnect(); this.adapter = null; } + this.callbacks.clear(); } @@ -146,7 +147,7 @@ export class EventManager { private executeCallbacks(type: string, payload: object): void { const callbackSet = this.callbacks.get(type); - if (!callbackSet) return; + if (!callbackSet) return; // No callbacks for this topic - message ignored callbackSet.forEach((callback) => { setTimeout(() => { @@ -172,19 +173,12 @@ export class EventManager { } private startBacklogMonitoring(intervalMs: number = 30000): void { - if (!this.adapter) return; + if (!(this.adapter instanceof KafkaAdapter)) return; - if (!this.historySince) { - this.historySince = new Date(Date.now() - 24 * 60 * 60 * 1000); - } + this.updateBacklogMetrics(); - this.updateBacklogMetrics().catch((e) => - console.error("Initial metrics update failed:", e) - ); this.backlogInterval = setInterval(() => { - this.updateBacklogMetrics().catch((e) => - console.error("Periodic metrics update failed:", e) - ); + this.updateBacklogMetrics(); }, intervalMs); } @@ -196,49 +190,16 @@ export class EventManager { } private async updateBacklogMetrics(): Promise { - if (!this.adapter) return; + if (!(this.adapter instanceof KafkaAdapter)) return; try { - if (this.adapter instanceof KafkaAdapter) { - const backlog = await this.adapter.getBacklog(TOPICS); - backlog.forEach((size, topic) => { - this.metrics.updateEventBacklog(topic, size); - }); - } else if (this.adapter instanceof TxEventQAdapter) { - const since = this.historySince; - const history = (await (this.adapter as any).getHistory(TOPICS, { - since, - limitPerTopic: 100, - newestFirst: true, - })) as Map>; - - let nextSince = since ?? new Date(0); - - for (const topic of TOPICS) { - const entries = history.get(topic) ?? []; - this.metrics.updateEventBacklog(topic, entries.length); - - for (const e of entries) { - const t = - e.enqueuedAt instanceof Date - ? e.enqueuedAt - : e.enqueuedAt - ? new Date(e.enqueuedAt) - : undefined; - if (t && t > nextSince) nextSince = t; - } - } - - if (nextSince && (!since || nextSince > since)) { - this.historySince = new Date(nextSince.getTime() + 1); - } - } - - if (this.metrics.getPushgatewayConfig()) { - await this.metrics.pushMetricsToGateway(); - } + const backlog = await this.adapter.getBacklog(TOPICS); + backlog.forEach((size, topic) => { + this.metrics.updateKafkaBacklog(topic, size); + console.log(`Backlog for topic ${topic}: ${size} messages`); + }); } catch (error) { - console.error("Error updating backlog/history metrics:", error); + console.error("Error updating backlog metrics:", error); } } @@ -262,3 +223,4 @@ export class EventManager { return this.metrics.getPushgatewayConfig(); } } + diff --git a/client/metrics.ts b/client/metrics.ts index a511da7..cd5592a 100644 --- a/client/metrics.ts +++ b/client/metrics.ts @@ -19,7 +19,7 @@ export class EventMetrics { private readonly publishErrors: client.Counter; private readonly callbackDuration: client.Histogram; private readonly throughput: client.Counter; - private readonly eventBacklog: client.Gauge; + private readonly kafkaBacklog: client.Gauge; constructor() { this.registry = new client.Registry(); @@ -76,9 +76,9 @@ export class EventMetrics { registers: [this.registry], }); - this.eventBacklog = new client.Gauge({ - name: "backlog_events_total", - help: "Total events waiting to be processed (or recent history count for TxEventQ), labeled by topic", + this.kafkaBacklog = new client.Gauge({ + name: "kafka_backlog_events_total", + help: "Total number of events waiting to be processed", labelNames: ["topic"], registers: [this.registry], }); @@ -103,14 +103,8 @@ export class EventMetrics { this.subscriptionGauge.labels(type).set(count); } - updateEventBacklog(topic: string, size: number): void { - this.eventBacklog.labels(topic).set(size); - } - - seedBacklogMetrics(topics: string[]): void { - for (const topic of topics) { - this.eventBacklog.labels(topic).set(0); - } + updateKafkaBacklog(topic: string, size: number): void { + this.kafkaBacklog.labels(topic).set(size); } startPushgateway(config: PushgatewayConfig = {}): void { @@ -123,8 +117,6 @@ export class EventMetrics { this.stopPushgateway(); - this.pushMetricsToGateway().catch(() => {}); - this.pushgatewayInterval = setInterval(() => { this.pushMetricsToGateway(); }, this.pushgatewayConfig.interval); @@ -150,21 +142,24 @@ export class EventMetrics { } try { - const body = await this.registry.metrics(); + const body = await this.registry.metrics(); let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; + if (this.pushgatewayConfig.instance) { url += `/instance/${this.pushgatewayConfig.instance}`; } const response = await fetch(url, { method: "POST", - headers: { "Content-Type": "text/plain; version=0.0.4; charset=utf-8" }, + headers: { "Content-Type": "text/plain" }, body, }); if (!response.ok) { throw new Error(`HTTP ${response.status}: ${response.statusText}`); } + + console.log("Metrics pushed to Pushgateway successfully"); } catch (err) { console.error("Failed to push metrics to Pushgateway:", err); } @@ -173,4 +168,4 @@ export class EventMetrics { getPushgatewayConfig(): PushgatewayConfig | undefined { return this.pushgatewayConfig; } -} +} \ No newline at end of file From e3805f389ac7a9bbe72e0e61f261e746ad3babbf Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Tue, 21 Oct 2025 11:54:34 +0300 Subject: [PATCH 33/35] remove await from kafka --- client/adapters/KafkaAdapter.js | 7 ++++- client/adapters/KafkaAdapter.ts | 7 ++++- client/adapters/TxEventQAdapter.js | 46 +++++++++++++----------------- package.json | 2 +- 4 files changed, 33 insertions(+), 29 deletions(-) diff --git a/client/adapters/KafkaAdapter.js b/client/adapters/KafkaAdapter.js index eb4a7d4..ccf8077 100644 --- a/client/adapters/KafkaAdapter.js +++ b/client/adapters/KafkaAdapter.js @@ -58,9 +58,14 @@ class KafkaAdapter { if (!this.producer) { throw new Error("Producer not connected"); } - await this.producer.send({ + this.producer.send({ topic: type, messages: [{ value: JSON.stringify(payload) }], + }).then(() => { + console.log(`Message published to topic ${type}`); + }).catch((error) => { + console.error(`Error publishing message to topic ${type}:`, error); + return Promise.reject(error); }); } async subscribe(type) { diff --git a/client/adapters/KafkaAdapter.ts b/client/adapters/KafkaAdapter.ts index 868b8c8..a92a224 100644 --- a/client/adapters/KafkaAdapter.ts +++ b/client/adapters/KafkaAdapter.ts @@ -71,9 +71,14 @@ export class KafkaAdapter implements EventAdapter { if (!this.producer) { throw new Error("Producer not connected"); } - await this.producer.send({ + this.producer.send({ topic: type, messages: [{ value: JSON.stringify(payload) }], + }).then(() => { + console.log(`Message published to topic ${type}`); + }).catch((error) => { + console.error(`Error publishing message to topic ${type}:`, error); + return Promise.reject(error); }); } diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index 31f665f..d15dbb5 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -82,7 +82,6 @@ class TxEventQAdapter { this.isRunning = false; if (this.connection) { try { - // Clear the queue cache this.queueCache.clear(); await this.connection.close(); console.log("TxEventQ connection closed"); @@ -98,11 +97,9 @@ class TxEventQAdapter { if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } - // Check if queue is already cached if (this.queueCache.has(queueName)) { return this.queueCache.get(queueName); } - // Create new queue and cache it const queue = await this.connection.getQueue(queueName, options); this.queueCache.set(queueName, queue); console.log(`Queue ${queueName} cached`); @@ -112,29 +109,26 @@ class TxEventQAdapter { if (!this.connection) { throw new Error("TxEventQAdapter not connected"); } - try { - const queueName = type; - this.queue = await this.getOrCreateQueue(queueName, { - payloadType: oracledb.DB_TYPE_JSON, - }); - const message = { - topic: type, - payload: payload, - }; - await this.queue.enqOne({ - payload: message, - correlation: type, - priority: 0, - delay: 0, - expiration: -1, - exceptionQueue: "", - }); - await this.connection.commit(); - } - catch (error) { - console.error("Failed to publish event to TxEventQ:", error.message); - throw error; - } + const queueName = type; + this.queue = await this.getOrCreateQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + }); + const message = { + topic: type, + payload: payload, + }; + this.queue + .enqOne({ + payload: message, + correlation: type, + priority: 0, + delay: 0, + expiration: -1, + exceptionQueue: "", + }) + .then(() => { + this.connection.commit(); + }); } async subscribe(type) { if (!this.connection) { diff --git a/package.json b/package.json index 51c7207..d996e78 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.39", + "version": "1.1.65", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [ From d8b8e6fc7145b34aa6bccb5916b2dd42406a81fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ebubekir=20Y=C4=B1lmaz?= Date: Tue, 21 Oct 2025 13:46:57 +0300 Subject: [PATCH 34/35] Add backlog monitoring for TxEventQ and generalize metrics Implemented backlog calculation for TxEventQAdapter and integrated it into EventManager's backlog monitoring. Refactored metrics to generalize backlog tracking (renamed kafkaBacklog to eventBacklog) and updated related methods to support multiple adapters. Backlog monitoring now supports both Kafka and TxEventQ adapters. --- client/adapters/TxEventQAdapter.ts | 58 +++++++++++++++++++++++------- client/eventManager.ts | 30 ++++++++++++---- client/metrics.ts | 14 ++++---- 3 files changed, 77 insertions(+), 25 deletions(-) diff --git a/client/adapters/TxEventQAdapter.ts b/client/adapters/TxEventQAdapter.ts index 77e94da..3591122 100644 --- a/client/adapters/TxEventQAdapter.ts +++ b/client/adapters/TxEventQAdapter.ts @@ -64,7 +64,7 @@ export class TxEventQAdapter implements EventAdapter { if (this.connection) { try { this.queueCache.clear(); - + await this.connection.close(); console.log("TxEventQ connection closed"); } catch (error) { @@ -89,9 +89,9 @@ export class TxEventQAdapter implements EventAdapter { const queue = await this.connection.getQueue(queueName, options); this.queueCache.set(queueName, queue); - + console.log(`Queue ${queueName} cached`); - + return queue; } @@ -102,9 +102,9 @@ export class TxEventQAdapter implements EventAdapter { const queueName = type; - this.queue = await this.getOrCreateQueue(queueName, { - payloadType: oracledb.DB_TYPE_JSON, - } as any); + this.queue = await this.getOrCreateQueue(queueName, { + payloadType: oracledb.DB_TYPE_JSON, + } as any); const message = { topic: type, @@ -132,18 +132,18 @@ export class TxEventQAdapter implements EventAdapter { this.isRunning = true; const queueName = `TXEVENTQ_USER.${type}`; - + this.queue = await this.getOrCreateQueue(queueName, { payloadType: oracledb.DB_TYPE_JSON, }); - + this.queue.deqOptions.wait = 5000; this.queue.deqOptions.consumerName = this.options.consumerName || `${type.toLowerCase()}_subscriber`; try { while (this.isRunning) { let messages: oracledb.AdvancedQueueMessage[] = []; - + const message = await this.queue.deqOne(); if (message) { messages = [message]; @@ -188,11 +188,45 @@ export class TxEventQAdapter implements EventAdapter { async getBacklog(topics: string[]): Promise> { const backlogMap = new Map(); + if (!this.connection || !topics?.length) return backlogMap; + + const sql = ` + SELECT NVL(SUM(s.ENQUEUED_MSGS - s.DEQUEUED_MSGS), 0) AS BACKLOG + FROM GV$AQ_SHARDED_SUBSCRIBER_STAT s + JOIN USER_QUEUES q + ON q.QID = s.QUEUE_ID + JOIN USER_QUEUE_SUBSCRIBERS sub + ON sub.SUBSCRIBER_ID = s.SUBSCRIBER_ID + AND sub.QUEUE_NAME = q.NAME + WHERE q.NAME IN (:queueName1, :queueName2) + AND (:consumerName IS NULL OR sub.CONSUMER_NAME = :consumerName) + `; + + const consumerName = + typeof this.options.consumerName === "string" + ? this.options.consumerName + : null; + + for (const topic of topics) { + const queueName1 = `TXEVENTQ_USER.${topic}`; + const queueName2 = topic; - if (topics.length === 0) { - return backlogMap; + try { + const result = await this.connection.execute( + sql, + { queueName1, queueName2, consumerName }, + { outFormat: oracledb.OUT_FORMAT_OBJECT } + ); + + const rows = (result.rows || []) as Array<{ BACKLOG: number }>; + const val = Number(rows?.[0]?.BACKLOG ?? 0); + backlogMap.set(topic, isNaN(val) ? 0 : val); + } catch (err) { + console.error(`Backlog query failed for topic ${topic}:`, err); + backlogMap.set(topic, 0); + } } - // TODO: Implement backlog calculation for TxEventQ + return backlogMap; } } diff --git a/client/eventManager.ts b/client/eventManager.ts index 8508ce6..b783c86 100644 --- a/client/eventManager.ts +++ b/client/eventManager.ts @@ -30,6 +30,7 @@ export class EventManager { private callbacks: Map> = new Map(); private metrics = new EventMetrics(); private backlogInterval: NodeJS.Timeout | null = null; + async init(options: InitOptions): Promise { if (this.adapter) { await this.disconnect(); @@ -63,6 +64,7 @@ export class EventManager { batchSize: options.batchSize, waitTime: options.waitTime, }); + this.startBacklogMonitoring(); break; default: @@ -74,6 +76,7 @@ export class EventManager { this.handleIncomingMessage(type, payload); }); } + async publish( ...args: [...string[], T] ): Promise { @@ -99,6 +102,7 @@ export class EventManager { throw error; } } + async subscribe( type: string, callback: Callback @@ -172,8 +176,15 @@ export class EventManager { } } - private startBacklogMonitoring(intervalMs: number = 30000): void { - if (!(this.adapter instanceof KafkaAdapter)) return; + private startBacklogMonitoring(intervalMs: number = 60000): void { + if (!this.adapter) return; + + // Only monitor for adapters that implement meaningful backlog + const supportsBacklog = + this.adapter instanceof KafkaAdapter || + this.adapter instanceof TxEventQAdapter; + + if (!supportsBacklog) return; this.updateBacklogMetrics(); @@ -190,12 +201,20 @@ export class EventManager { } private async updateBacklogMetrics(): Promise { - if (!(this.adapter instanceof KafkaAdapter)) return; + if (!this.adapter) return; + + const supportsBacklog = + this.adapter instanceof KafkaAdapter || + this.adapter instanceof TxEventQAdapter; + + if (!supportsBacklog) return; try { - const backlog = await this.adapter.getBacklog(TOPICS); + const backlog = await ( + this.adapter as KafkaAdapter | TxEventQAdapter + ).getBacklog(TOPICS); backlog.forEach((size, topic) => { - this.metrics.updateKafkaBacklog(topic, size); + this.metrics.updateEventBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); }); } catch (error) { @@ -223,4 +242,3 @@ export class EventManager { return this.metrics.getPushgatewayConfig(); } } - diff --git a/client/metrics.ts b/client/metrics.ts index cd5592a..b8794fc 100644 --- a/client/metrics.ts +++ b/client/metrics.ts @@ -19,7 +19,7 @@ export class EventMetrics { private readonly publishErrors: client.Counter; private readonly callbackDuration: client.Histogram; private readonly throughput: client.Counter; - private readonly kafkaBacklog: client.Gauge; + private readonly eventBacklog: client.Gauge; constructor() { this.registry = new client.Registry(); @@ -76,8 +76,8 @@ export class EventMetrics { registers: [this.registry], }); - this.kafkaBacklog = new client.Gauge({ - name: "kafka_backlog_events_total", + this.eventBacklog = new client.Gauge({ + name: "backlog_events_total", help: "Total number of events waiting to be processed", labelNames: ["topic"], registers: [this.registry], @@ -103,8 +103,8 @@ export class EventMetrics { this.subscriptionGauge.labels(type).set(count); } - updateKafkaBacklog(topic: string, size: number): void { - this.kafkaBacklog.labels(topic).set(size); + updateEventBacklog(topic: string, size: number): void { + this.eventBacklog.labels(topic).set(size); } startPushgateway(config: PushgatewayConfig = {}): void { @@ -142,7 +142,7 @@ export class EventMetrics { } try { - const body = await this.registry.metrics(); + const body = await this.registry.metrics(); let url = `${this.pushgatewayConfig.url}/metrics/job/${this.pushgatewayConfig.jobName}`; if (this.pushgatewayConfig.instance) { @@ -168,4 +168,4 @@ export class EventMetrics { getPushgatewayConfig(): PushgatewayConfig | undefined { return this.pushgatewayConfig; } -} \ No newline at end of file +} From 8a63bd703025761f8d568f8699ab41b2cf55edba Mon Sep 17 00:00:00 2001 From: Halil Ibrahim Cengel Date: Wed, 12 Nov 2025 11:01:55 +0300 Subject: [PATCH 35/35] Add backlogs --- client/adapters/TxEventQAdapter.js | 30 ++++++++++++++++++++++++++++-- client/eventManager.js | 18 ++++++++++++++---- client/metrics.d.ts | 4 ++-- client/metrics.js | 10 +++++----- package.json | 2 +- 5 files changed, 50 insertions(+), 14 deletions(-) diff --git a/client/adapters/TxEventQAdapter.js b/client/adapters/TxEventQAdapter.js index d15dbb5..cb65ba7 100644 --- a/client/adapters/TxEventQAdapter.js +++ b/client/adapters/TxEventQAdapter.js @@ -183,10 +183,36 @@ class TxEventQAdapter { } async getBacklog(topics) { const backlogMap = new Map(); - if (topics.length === 0) { + if (!this.connection || !topics?.length) return backlogMap; + const sql = ` + SELECT NVL(SUM(s.ENQUEUED_MSGS - s.DEQUEUED_MSGS), 0) AS BACKLOG + FROM GV$AQ_SHARDED_SUBSCRIBER_STAT s + JOIN USER_QUEUES q + ON q.QID = s.QUEUE_ID + JOIN USER_QUEUE_SUBSCRIBERS sub + ON sub.SUBSCRIBER_ID = s.SUBSCRIBER_ID + AND sub.QUEUE_NAME = q.NAME + WHERE q.NAME IN (:queueName1, :queueName2) + AND (:consumerName IS NULL OR sub.CONSUMER_NAME = :consumerName) + `; + const consumerName = typeof this.options.consumerName === "string" + ? this.options.consumerName + : null; + for (const topic of topics) { + const queueName1 = `TXEVENTQ_USER.${topic}`; + const queueName2 = topic; + try { + const result = await this.connection.execute(sql, { queueName1, queueName2, consumerName }, { outFormat: oracledb.OUT_FORMAT_OBJECT }); + const rows = (result.rows || []); + const val = Number(rows?.[0]?.BACKLOG ?? 0); + backlogMap.set(topic, isNaN(val) ? 0 : val); + } + catch (err) { + console.error(`Backlog query failed for topic ${topic}:`, err); + backlogMap.set(topic, 0); + } } - // TODO: Implement backlog calculation for TxEventQ return backlogMap; } } diff --git a/client/eventManager.js b/client/eventManager.js index ae6731a..02d74ac 100644 --- a/client/eventManager.js +++ b/client/eventManager.js @@ -62,6 +62,7 @@ class EventManager { batchSize: options.batchSize, waitTime: options.waitTime, }); + this.startBacklogMonitoring(); break; default: throw new Error(`Unknown adapter type`); @@ -151,8 +152,13 @@ class EventManager { throw new Error("Invalid event type"); } } - startBacklogMonitoring(intervalMs = 30000) { - if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) + startBacklogMonitoring(intervalMs = 60000) { + if (!this.adapter) + return; + // Only monitor for adapters that implement meaningful backlog + const supportsBacklog = this.adapter instanceof KafkaAdapter_1.KafkaAdapter || + this.adapter instanceof TxEventQAdapter_1.TxEventQAdapter; + if (!supportsBacklog) return; this.updateBacklogMetrics(); this.backlogInterval = setInterval(() => { @@ -166,12 +172,16 @@ class EventManager { } } async updateBacklogMetrics() { - if (!(this.adapter instanceof KafkaAdapter_1.KafkaAdapter)) + if (!this.adapter) + return; + const supportsBacklog = this.adapter instanceof KafkaAdapter_1.KafkaAdapter || + this.adapter instanceof TxEventQAdapter_1.TxEventQAdapter; + if (!supportsBacklog) return; try { const backlog = await this.adapter.getBacklog(TOPICS); backlog.forEach((size, topic) => { - this.metrics.updateKafkaBacklog(topic, size); + this.metrics.updateEventBacklog(topic, size); console.log(`Backlog for topic ${topic}: ${size} messages`); }); } diff --git a/client/metrics.d.ts b/client/metrics.d.ts index ff344f8..ee1aa33 100644 --- a/client/metrics.d.ts +++ b/client/metrics.d.ts @@ -15,13 +15,13 @@ export declare class EventMetrics { private readonly publishErrors; private readonly callbackDuration; private readonly throughput; - private readonly kafkaBacklog; + private readonly eventBacklog; constructor(); recordPublish(type: string, payloadSizeBytes: number): () => void; recordPublishError(type: string, errorType: string): void; recordCallback(type: string): () => void; updateSubscriptions(type: string, count: number): void; - updateKafkaBacklog(topic: string, size: number): void; + updateEventBacklog(topic: string, size: number): void; startPushgateway(config?: PushgatewayConfig): void; stopPushgateway(): void; pushMetricsToGateway(): Promise; diff --git a/client/metrics.js b/client/metrics.js index b0582a0..82e3887 100644 --- a/client/metrics.js +++ b/client/metrics.js @@ -46,7 +46,7 @@ class EventMetrics { publishErrors; callbackDuration; throughput; - kafkaBacklog; + eventBacklog; constructor() { this.registry = new client.Registry(); this.publishCounter = new client.Counter({ @@ -94,8 +94,8 @@ class EventMetrics { labelNames: ["event_type"], registers: [this.registry], }); - this.kafkaBacklog = new client.Gauge({ - name: "kafka_backlog_events_total", + this.eventBacklog = new client.Gauge({ + name: "backlog_events_total", help: "Total number of events waiting to be processed", labelNames: ["topic"], registers: [this.registry], @@ -116,8 +116,8 @@ class EventMetrics { updateSubscriptions(type, count) { this.subscriptionGauge.labels(type).set(count); } - updateKafkaBacklog(topic, size) { - this.kafkaBacklog.labels(topic).set(size); + updateEventBacklog(topic, size) { + this.eventBacklog.labels(topic).set(size); } startPushgateway(config = {}) { this.pushgatewayConfig = { diff --git a/package.json b/package.json index d996e78..3c7e8b7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "node-event-test-package", - "version": "1.1.65", + "version": "1.1.66", "description": "Event-driven Message Broker", "main": "index.js", "keywords": [