From e71e4a95dedfb8435c2f455ceeed7756214b474e Mon Sep 17 00:00:00 2001 From: star-boy Date: Mon, 16 Mar 2026 15:45:13 +0530 Subject: [PATCH 1/3] fix(postHog): fixed the logger in the backend and added provider in the frontend --- backend/src/utils/logger.ts | 2 +- frontend/package.json | 1 + frontend/src/app/layout.tsx | 9 +- frontend/src/app/providers.tsx | 17 +++ pnpm-lock.yaml | 241 +++++++++++++++++++++++++++++++-- 5 files changed, 254 insertions(+), 16 deletions(-) create mode 100644 frontend/src/app/providers.tsx diff --git a/backend/src/utils/logger.ts b/backend/src/utils/logger.ts index 53ea4a6..30b27d1 100644 --- a/backend/src/utils/logger.ts +++ b/backend/src/utils/logger.ts @@ -31,7 +31,7 @@ const errorRotateTransport = new DailyRotateFile({ const logtail = new Logtail(ENV.UPTIME.BETTER_STACK_TOKEN); const logger = winston.createLogger({ - level: "http", // ← key fix! + level: "http", format: combine(timestamp({ format: "YYYY-MM-DD HH:mm:ss" }), errors({ stack: true }), logFormat), transports: [ dailyRotateTransport, diff --git a/frontend/package.json b/frontend/package.json index 9de4796..37d22fb 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -36,6 +36,7 @@ "lucide": "^0.556.0", "lucide-react": "^0.553.0", "next": "16.1.1", + "posthog-js": "^1.360.0", "react": "19.2.0", "react-day-picker": "^9.13.0", "react-dom": "19.2.0", diff --git a/frontend/src/app/layout.tsx b/frontend/src/app/layout.tsx index c3822cf..6adf602 100644 --- a/frontend/src/app/layout.tsx +++ b/frontend/src/app/layout.tsx @@ -3,6 +3,7 @@ import { Poppins } from "next/font/google"; import "./globals.css"; import { ToastProvider } from "@/context/ToastContext"; import { SocketProvider } from "@/context/SocketContext"; +import { PostHogProvider } from "./providers"; const poppins = Poppins({ variable: "--font-poppins-sans", @@ -32,9 +33,11 @@ export default function RootLayout({ return ( - - {children} - + + + {children} + + ); diff --git a/frontend/src/app/providers.tsx b/frontend/src/app/providers.tsx new file mode 100644 index 0000000..d29afcd --- /dev/null +++ b/frontend/src/app/providers.tsx @@ -0,0 +1,17 @@ +"use client"; + +import { useEffect } from "react"; + +import posthog from "posthog-js"; +import { PostHogProvider as PHProvider } from "posthog-js/react"; + +export function PostHogProvider({ children }: { children: React.ReactNode }) { + useEffect(() => { + posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY as string, { + api_host: process.env.NEXT_PUBLIC_POSTHOG_HOST, + defaults: "2026-01-30", + }); + }, []); + + return {children}; +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3d6e847..0dd1b3c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -310,6 +310,9 @@ importers: next: specifier: 16.1.1 version: 16.1.1(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(babel-plugin-react-compiler@1.0.0)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + posthog-js: + specifier: ^1.360.0 + version: 1.360.0 react: specifier: 19.2.0 version: 19.2.0 @@ -361,7 +364,7 @@ importers: version: 9.39.2(jiti@2.6.1) eslint-config-next: specifier: 16.0.1 - version: 16.0.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + version: 16.0.1(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) eslint-config-prettier: specifier: ^10.1.8 version: 10.1.8(eslint@9.39.2(jiti@2.6.1)) @@ -1458,14 +1461,88 @@ packages: resolution: {integrity: sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==} engines: {node: '>=12.4.0'} + '@opentelemetry/api-logs@0.208.0': + resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==} + engines: {node: '>=8.0.0'} + '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} + '@opentelemetry/core@2.2.0': + resolution: {integrity: sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/core@2.6.0': + resolution: {integrity: sha512-HLM1v2cbZ4TgYN6KEOj+Bbj8rAKriOdkF9Ed3tG25FoprSiQl7kYc+RRT6fUZGOvx0oMi5U67GoFdT+XUn8zEg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/exporter-logs-otlp-http@0.208.0': + resolution: {integrity: sha512-jOv40Bs9jy9bZVLo/i8FwUiuCvbjWDI+ZW13wimJm4LjnlwJxGgB+N/VWOZUTpM+ah/awXeQqKdNlpLf2EjvYg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-exporter-base@0.208.0': + resolution: {integrity: sha512-gMd39gIfVb2OgxldxUtOwGJYSH8P1kVFFlJLuut32L6KgUC4gl1dMhn+YC2mGn0bDOiQYSk/uHOdSjuKp58vvA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.208.0': + resolution: {integrity: sha512-DCFPY8C6lAQHUNkzcNT9R+qYExvsk6C5Bto2pbNxgicpcSWbe2WHShLxkOxIdNcBiYPdVHv/e7vH7K6TI+C+fQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/resources@2.2.0': + resolution: {integrity: sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/resources@2.6.0': + resolution: {integrity: sha512-D4y/+OGe3JSuYUCBxtH5T9DSAWNcvCb/nQWIga8HNtXTVPQn59j0nTBAgaAXxUVBDl40mG3Tc76b46wPlZaiJQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.208.0': + resolution: {integrity: sha512-QlAyL1jRpOeaqx7/leG1vJMp84g0xKP6gJmfELBpnI4O/9xPX+Hu5m1POk9Kl+veNkyth5t19hRlN6tNY1sjbA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.2.0': + resolution: {integrity: sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.2.0': + resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.40.0': + resolution: {integrity: sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==} + engines: {node: '>=14'} + '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} + '@posthog/core@1.23.2': + resolution: {integrity: sha512-zTDdda9NuSHrnwSOfFMxX/pyXiycF4jtU1kTr8DL61dHhV+7LF6XF1ndRZZTuaGGbfbb/GJYkEsjEX9SXfNZeQ==} + + '@posthog/types@1.360.0': + resolution: {integrity: sha512-roypbiJ49V3jWlV/lzhXGf0cKLLRj69L4H4ZHW6YsITHlnjQ12cgdPhPS88Bb9nW9xZTVSGWWDjfNGsdgAxsNg==} + '@protobufjs/aspromise@1.1.2': resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} @@ -2644,6 +2721,9 @@ packages: '@types/triple-beam@1.3.5': resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==} + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/ua-parser-js@0.7.39': resolution: {integrity: sha512-P/oDfpofrdtF5xw433SPALpdSchtJmY7nsJItf8h3KXqOslkbySh8zq4dSWXH2oTjRvJ5PczVEoCZPow6GicLg==} @@ -3189,6 +3269,9 @@ packages: resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} engines: {node: '>= 0.6'} + core-js@3.48.0: + resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==} + cors@2.8.5: resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} engines: {node: '>= 0.10'} @@ -3358,6 +3441,10 @@ packages: resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==} engines: {node: '>=0.10.0'} + dompurify@3.3.2: + resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==} + engines: {node: '>=20'} + dot-prop@5.3.0: resolution: {integrity: sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==} engines: {node: '>=8'} @@ -3671,6 +3758,9 @@ packages: resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} engines: {node: ^12.20 || >= 14.13} + fflate@0.4.8: + resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} + file-entry-cache@8.0.0: resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} engines: {node: '>=16.0.0'} @@ -4881,6 +4971,12 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + posthog-js@1.360.0: + resolution: {integrity: sha512-jkyO+T97yi6RuiexOaXC7AnEGiC+yIfGU5DIUzI5rqBH6MltmtJw/ve2Oxc4jeua2WDr5sXMzo+SS+acbpueAA==} + + preact@10.28.4: + resolution: {integrity: sha512-uKFfOHWuSNpRFVTnljsCluEFq57OKT+0QdOiQo8XWnQ/pSvg7OpX5eNOejELXJMWy+BwM2nobz0FkvzmnpCNsQ==} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -4926,6 +5022,9 @@ packages: resolution: {integrity: sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==} engines: {node: '>=0.6'} + query-selector-shadow-dom@1.0.1: + resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==} + querystringify@2.2.0: resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} @@ -5611,6 +5710,9 @@ packages: web-vitals@4.2.4: resolution: {integrity: sha512-r4DIlprAGwJ7YM11VZp4R884m0Vmgr6EAKe3P+kO0PPj3Unqyvv59rczf6UiGcb9Z8QxZVcqKNwv/g0WNdWwsw==} + web-vitals@5.1.0: + resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} + webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} @@ -7634,12 +7736,91 @@ snapshots: '@nolyfill/is-core-module@1.0.39': {} - '@opentelemetry/api@1.9.0': - optional: true + '@opentelemetry/api-logs@0.208.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/exporter-logs-otlp-http@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-exporter-base@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.208.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + + '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-logs@0.208.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/semantic-conventions@1.40.0': {} '@pkgjs/parseargs@0.11.0': optional: true + '@posthog/core@1.23.2': + dependencies: + cross-spawn: 7.0.6 + + '@posthog/types@1.360.0': {} + '@protobufjs/aspromise@1.1.2': {} '@protobufjs/base64@1.1.2': {} @@ -9030,6 +9211,9 @@ snapshots: '@types/triple-beam@1.3.5': {} + '@types/trusted-types@2.0.7': + optional: true + '@types/ua-parser-js@0.7.39': {} '@types/use-sync-external-store@0.0.6': {} @@ -9590,6 +9774,8 @@ snapshots: cookie@0.7.2: {} + core-js@3.48.0: {} + cors@2.8.5: dependencies: object-assign: 4.1.1 @@ -9733,6 +9919,10 @@ snapshots: dependencies: esutils: 2.0.3 + dompurify@3.3.2: + optionalDependencies: + '@types/trusted-types': 2.0.7 + dot-prop@5.3.0: dependencies: is-obj: 2.0.0 @@ -9930,13 +10120,13 @@ snapshots: escape-string-regexp@4.0.0: {} - eslint-config-next@16.0.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3): + eslint-config-next@16.0.1(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3): dependencies: '@next/eslint-plugin-next': 16.0.1 eslint: 9.39.2(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)) - eslint-plugin-import: 2.32.0(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.39.2(jiti@2.6.1)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.39.2(jiti@2.6.1)) eslint-plugin-jsx-a11y: 6.10.2(eslint@9.39.2(jiti@2.6.1)) eslint-plugin-react: 7.37.5(eslint@9.39.2(jiti@2.6.1)) eslint-plugin-react-hooks: 7.0.1(eslint@9.39.2(jiti@2.6.1)) @@ -9962,7 +10152,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)): + eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@9.39.2(jiti@2.6.1)): dependencies: '@nolyfill/is-core-module': 1.0.39 debug: 4.4.3(supports-color@5.5.0) @@ -9973,21 +10163,22 @@ snapshots: tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.39.2(jiti@2.6.1)) transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.1(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.39.2(jiti@2.6.1)): dependencies: debug: 3.2.7 optionalDependencies: + '@typescript-eslint/parser': 8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.2(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.39.2(jiti@2.6.1)) transitivePeerDependencies: - supports-color - eslint-plugin-import@2.32.0(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.39.2(jiti@2.6.1)): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.9 @@ -9998,7 +10189,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.39.2(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)))(eslint@9.39.2(jiti@2.6.1)) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.39.2(jiti@2.6.1)) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -10009,6 +10200,8 @@ snapshots: semver: 6.3.1 string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 + optionalDependencies: + '@typescript-eslint/parser': 8.51.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -10227,6 +10420,8 @@ snapshots: node-domexception: 1.0.0 web-streams-polyfill: 3.3.3 + fflate@0.4.8: {} + file-entry-cache@8.0.0: dependencies: flat-cache: 4.0.1 @@ -11478,6 +11673,24 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + posthog-js@1.360.0: + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.208.0 + '@opentelemetry/exporter-logs-otlp-http': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.208.0(@opentelemetry/api@1.9.0) + '@posthog/core': 1.23.2 + '@posthog/types': 1.360.0 + core-js: 3.48.0 + dompurify: 3.3.2 + fflate: 0.4.8 + preact: 10.28.4 + query-selector-shadow-dom: 1.0.1 + web-vitals: 5.1.0 + + preact@10.28.4: {} + prelude-ls@1.2.1: {} prettier@3.7.4: {} @@ -11525,6 +11738,8 @@ snapshots: dependencies: side-channel: 1.1.0 + query-selector-shadow-dom@1.0.1: {} + querystringify@2.2.0: {} queue-microtask@1.2.3: {} @@ -12350,6 +12565,8 @@ snapshots: web-vitals@4.2.4: {} + web-vitals@5.1.0: {} + webidl-conversions@3.0.1: {} webidl-conversions@7.0.0: {} From 9fa5fe4a5bd37f8ffc63e2a044f18145bcb30939 Mon Sep 17 00:00:00 2001 From: hafzism Date: Sat, 2 May 2026 18:46:19 +0530 Subject: [PATCH 2/3] feat: implement Docker containerization for full-stack application --- .dockerignore | 32 + .github/workflows/cd.yml | 221 +++-- backend/Dockerfile | 51 ++ docker-compose.yml | 170 ++++ docs/NOTIFICATIONS_MASTERCLASS.md | 1370 +++++++++++++++++++++++++++++ docs/S3_MASTERCLASS.md | 1250 ++++++++++++++++++++++++++ frontend/Dockerfile | 74 ++ nginx/nginx.conf | 151 ++++ rabbitmq/Dockerfile | 7 + rabbitmq/enabled_plugins | 1 + 10 files changed, 3205 insertions(+), 122 deletions(-) create mode 100644 .dockerignore create mode 100644 backend/Dockerfile create mode 100644 docker-compose.yml create mode 100644 docs/NOTIFICATIONS_MASTERCLASS.md create mode 100644 docs/S3_MASTERCLASS.md create mode 100644 frontend/Dockerfile create mode 100644 nginx/nginx.conf create mode 100644 rabbitmq/Dockerfile create mode 100644 rabbitmq/enabled_plugins diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..f2ca252 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,32 @@ +# Node +node_modules +**/node_modules + +# Build outputs +**/dist +**/.next + +# Development files +**/.env +**/.env.local +**/*.pem + +# Git +.git +.gitignore +.github + +# Docs & misc +docs +images +README.md +*.log + +# Editor +.vscode +.idea + +# Test files +**/__tests__ +**/*.test.ts +**/*.spec.ts diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 9fd6d3d..fc51af8 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -1,4 +1,4 @@ -name: Deploy to EC2 +name: Deploy to EC2 (Docker) on: push: @@ -13,60 +13,9 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 - - name: Install pnpm - uses: pnpm/action-setup@v2 - with: - version: 9 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - cache: "pnpm" - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - # ---------- SCHEMAS BUILD ---------- - - name: Build schemas - working-directory: schemas - run: pnpm run build - - # ---------- BACKEND BUILD ---------- - - name: Create Service Account Key - working-directory: backend - run: echo '${{ secrets.FIREBASE_SERVICE_ACCOUNT_JSON }}' > src/serviceAccountKey.json - - - name: Build backend - working-directory: backend - run: pnpm run build - - # ---------- FRONTEND BUILD ---------- - - name: Build frontend - working-directory: frontend - run: | - # Replace placeholders in service worker - sed -i "s|NEXT_PUBLIC_FIREBASE_API_KEY_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_API_KEY")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_PROJECT_ID_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_PROJECT_ID")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_APP_ID_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_APP_ID")|g" public/firebase-messaging-sw.js - sed -i "s|NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID_PLACEHOLDER|$(echo -n "$NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID")|g" public/firebase-messaging-sw.js - - pnpm run build - env: - NEXT_PUBLIC_API_URL: ${{ secrets.BACKEND_URL }} - NEXT_PUBLIC_VAPID_KEY: ${{ secrets.NEXT_PUBLIC_VAPID_KEY }} - NEXT_PUBLIC_FIREBASE_API_KEY: ${{ secrets.NEXT_PUBLIC_FIREBASE_API_KEY }} - NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN: ${{ secrets.NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN }} - NEXT_PUBLIC_FIREBASE_PROJECT_ID: ${{ secrets.NEXT_PUBLIC_FIREBASE_PROJECT_ID }} - NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET: ${{ secrets.NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET }} - NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID: ${{ secrets.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID }} - NEXT_PUBLIC_FIREBASE_APP_ID: ${{ secrets.NEXT_PUBLIC_FIREBASE_APP_ID }} - NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID: ${{ secrets.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID }} - - # ---------- SSH ---------- + # -------------------------------------------------------- + # SSH Setup + # -------------------------------------------------------- - name: Setup SSH run: | mkdir -p ~/.ssh @@ -74,85 +23,113 @@ jobs: chmod 600 ~/.ssh/ec2.pem ssh-keyscan -H ${{ secrets.EC2_HOST }} >> ~/.ssh/known_hosts - # ---------- DEPLOY BACKEND ---------- - - name: Deploy Backend Artifacts + # -------------------------------------------------------- + # Sync project files to EC2 + # Excludes: node_modules, .next, dist, .git, local env files + # -------------------------------------------------------- + - name: Sync project to EC2 run: | - # Copy compiled backend code and package files - rsync -avz --delete \ - -e "ssh -i ~/.ssh/ec2.pem" \ - backend/dist backend/package.json \ - ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/backend/ - - # Deploy schemas (needed for workspace) rsync -avz --delete \ + --exclude='node_modules' \ + --exclude='.next' \ + --exclude='dist' \ + --exclude='.git' \ + --exclude='backend/.env' \ + --exclude='frontend/.env.local' \ + --exclude='certbot' \ -e "ssh -i ~/.ssh/ec2.pem" \ - schemas/ \ - ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/schemas/ - - # Copy root config - rsync -avz \ - -e "ssh -i ~/.ssh/ec2.pem" \ - pnpm-workspace.yaml package.json pnpm-lock.yaml \ + ./ \ ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/ - - name: Restart Backend + # -------------------------------------------------------- + # Inject secrets on the remote server + # -------------------------------------------------------- + - name: Write backend .env on EC2 run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'EOF' - set -e - export NVM_DIR="$HOME/.nvm" - source "$NVM_DIR/nvm.sh" - - cd ${{ secrets.APP_DIR }}/backend - - # Use CI=true to bypass the TTY/interactive prompt - # Also added --force to ensure it actually wipes the bad modules - CI=true pnpm install --prod --frozen-lockfile --ignore-scripts --force - - pm2 delete backend || true - pm2 start dist/app.js --name "backend" - pm2 delete worker || true - pm2 start dist/workers/index.js --name "worker" - pm2 save + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' + cat > ${{ secrets.APP_DIR }}/backend/.env << 'EOF' + NODE_ENV=production + PORT=5000 + FRONTEND_URL=${{ secrets.FRONTEND_URL }} + BACKEND_URL=${{ secrets.BACKEND_URL }} + GOOGLE_CLIENT_ID=${{ secrets.GOOGLE_CLIENT_ID }} + GOOGLE_CLIENT_SECRET=${{ secrets.GOOGLE_CLIENT_SECRET }} + GOOGLE_CALLBACK_URL=${{ secrets.GOOGLE_CALLBACK_URL }} + MONGODB_URI=${{ secrets.MONGODB_URI }} + ACCESS_TOKEN_SECRET=${{ secrets.ACCESS_TOKEN_SECRET }} + REFRESH_TOKEN_SECRET=${{ secrets.REFRESH_TOKEN_SECRET }} + JWT_EXPIRES_IN=7d + STRIPE_SECRET_KEY=${{ secrets.STRIPE_SECRET_KEY }} + STRIPE_PUBLISHABLE_KEY=${{ secrets.STRIPE_PUBLISHABLE_KEY }} + STRIPE_PRO_PRICE_ID=${{ secrets.STRIPE_PRO_PRICE_ID }} + STRIPE_WEBHOOK_SECRET=${{ secrets.STRIPE_WEBHOOK_SECRET }} + EMAIL_USER=${{ secrets.EMAIL_USER }} + EMAIL_PASS=${{ secrets.EMAIL_PASS }} + AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_REGION=${{ secrets.AWS_REGION }} + AWS_S3_BUCKET_NAME=${{ secrets.AWS_S3_BUCKET_NAME }} + TUMBLR_CONSUMER_KEY=${{ secrets.TUMBLR_CONSUMER_KEY }} + TUMBLR_CONSUMER_SECRET=${{ secrets.TUMBLR_CONSUMER_SECRET }} + META_APP_ID=${{ secrets.META_APP_ID }} + META_APP_SECRET=${{ secrets.META_APP_SECRET }} + META_REDIRECT_URI=${{ secrets.META_REDIRECT_URI }} + THREADS_APP_ID=${{ secrets.THREADS_APP_ID }} + THREADS_APP_SECRET=${{ secrets.THREADS_APP_SECRET }} + THREADS_REDIRECT_URI=${{ secrets.THREADS_REDIRECT_URI }} + MASTODON_CALLBACK_URL=${{ secrets.MASTODON_CALLBACK_URL }} + MASTODON_CLIENT_KEY=${{ secrets.MASTODON_CLIENT_KEY }} + MASTODON_CLIENT_SECRET=${{ secrets.MASTODON_CLIENT_SECRET }} + MASTODON_INSTANCE_URL=https://mastodon.social + RABBITMQ_URL=amqp://${{ secrets.RABBITMQ_USER }}:${{ secrets.RABBITMQ_PASS }}@rabbitmq:5672 + REDIS_HOST=redis + REDIS_PORT=6379 + GEMINI_API_KEY=${{ secrets.GEMINI_API_KEY }} + BETTER_STACK_TOKEN=${{ secrets.BETTER_STACK_TOKEN }} EOF + ENDSSH - # ---------- DEPLOY FRONTEND ---------- - - name: Deploy Frontend Artifacts + - name: Write Firebase service account on EC2 run: | - # 1. Create the NESTED directory structure manually - # Note the extra /frontend/ in the path ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} \ - "mkdir -p ${{ secrets.APP_DIR }}/frontend-deploy/frontend/.next/static ${{ secrets.APP_DIR }}/frontend-deploy/frontend/public" - - # 2. Copy standalone build - rsync -avz --delete \ - -e "ssh -i ~/.ssh/ec2.pem" \ - frontend/.next/standalone/ \ - ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/frontend-deploy/ - - # 3. Copy static files (public) INTO the nested frontend folder - rsync -avz \ - -e "ssh -i ~/.ssh/ec2.pem" \ - frontend/public/ \ - ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/frontend-deploy/frontend/public/ + "echo '${{ secrets.FIREBASE_SERVICE_ACCOUNT_JSON }}' > ${{ secrets.APP_DIR }}/backend/src/serviceAccountKey.json" - # 4. Copy static files (.next/static) INTO the nested frontend folder - rsync -avz \ - -e "ssh -i ~/.ssh/ec2.pem" \ - frontend/.next/static/ \ - ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/frontend-deploy/frontend/.next/static/ + - name: Write root .env (for docker-compose build args) on EC2 + run: | + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' + cat > ${{ secrets.APP_DIR }}/.env << 'EOF' + NEXT_PUBLIC_API_URL=${{ secrets.BACKEND_URL }} + NEXT_PUBLIC_VAPID_KEY=${{ secrets.NEXT_PUBLIC_VAPID_KEY }} + NEXT_PUBLIC_FIREBASE_API_KEY=${{ secrets.NEXT_PUBLIC_FIREBASE_API_KEY }} + NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN=${{ secrets.NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN }} + NEXT_PUBLIC_FIREBASE_PROJECT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_PROJECT_ID }} + NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET=${{ secrets.NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET }} + NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID }} + NEXT_PUBLIC_FIREBASE_APP_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_APP_ID }} + NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID }} + RABBITMQ_USER=${{ secrets.RABBITMQ_USER }} + RABBITMQ_PASS=${{ secrets.RABBITMQ_PASS }} + EOF + ENDSSH - - name: Restart Frontend + # -------------------------------------------------------- + # Build & restart containers on EC2 + # -------------------------------------------------------- + - name: Deploy with Docker Compose run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'EOF' + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' set -e - export NVM_DIR="$HOME/.nvm" - source "$NVM_DIR/nvm.sh" + cd ${{ secrets.APP_DIR }} - # Go to the nested folder where server.js lives - cd ${{ secrets.APP_DIR }}/frontend-deploy/frontend - - # Clean restart to pick up new files - pm2 delete frontend || true - PORT=3000 pm2 start server.js --name "frontend" - pm2 save - EOF + # Pull latest images (redis, rabbitmq, nginx) + docker compose pull redis rabbitmq nginx certbot + + # Build app images (frontend + backend/worker) + docker compose build --no-cache frontend backend + + # Restart everything with zero manual intervention + docker compose up -d --remove-orphans + + # Remove dangling images to save disk space + docker image prune -f + ENDSSH diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..e873b6a --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,51 @@ +# ============================================================ +# Stage 1: Builder +# ============================================================ + +FROM node:20-alpine AS builder + +RUN corepack enable && corepack prepare pnpm@9 --activate + +WORKDIR /app + +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ +COPY schemas/ ./schemas/ + +COPY backend/package.json ./backend/ +COPY backend/tsconfig.json ./backend/ +COPY backend/src/ ./backend/src/ + +RUN pnpm install --frozen-lockfile + +RUN cd schemas && pnpm run build + +RUN cd backend && pnpm run build + + +# ============================================================ +# Stage 2: Production Runtime +# ============================================================ +FROM node:20-alpine AS runner + +RUN corepack enable && corepack prepare pnpm@9 --activate + +WORKDIR /app + +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ + +COPY --from=builder /app/schemas/ ./schemas/ + +COPY backend/package.json ./backend/ + +RUN pnpm install --prod --frozen-lockfile --ignore-scripts + +COPY --from=builder /app/backend/dist ./backend/dist +COPY backend/src/serviceAccountKey.json ./backend/dist/serviceAccountKey.json + +WORKDIR /app/backend + +ENV NODE_ENV=production + +EXPOSE 5000 + +CMD ["node", "dist/app.js"] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..c52df5d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,170 @@ +name: hayon + +services: + # ============================================================ + # Nginx + # ============================================================ + nginx: + image: nginx:1.27-alpine + container_name: hayon_nginx + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro + - ./certbot/conf:/etc/letsencrypt:ro + - ./certbot/www:/var/www/certbot:ro + depends_on: + - frontend + - backend + restart: unless-stopped + networks: + - hayon_net + + # ============================================================ + # Certbot + # ============================================================ + certbot: + image: certbot/certbot:latest + container_name: hayon_certbot + volumes: + - ./certbot/conf:/etc/letsencrypt + - ./certbot/www:/var/www/certbot + # Run manually: docker compose run --rm certbot certonly ... + entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" + networks: + - hayon_net + + # ============================================================ + # Frontend + # ============================================================ + frontend: + build: + context: . + dockerfile: frontend/Dockerfile + args: + NEXT_PUBLIC_API_URL: ${NEXT_PUBLIC_API_URL} + NEXT_PUBLIC_VAPID_KEY: ${NEXT_PUBLIC_VAPID_KEY} + NEXT_PUBLIC_FIREBASE_API_KEY: ${NEXT_PUBLIC_FIREBASE_API_KEY} + NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN: ${NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN} + NEXT_PUBLIC_FIREBASE_PROJECT_ID: ${NEXT_PUBLIC_FIREBASE_PROJECT_ID} + NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET: ${NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET} + NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID: ${NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID} + NEXT_PUBLIC_FIREBASE_APP_ID: ${NEXT_PUBLIC_FIREBASE_APP_ID} + NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID: ${NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID} + container_name: hayon_frontend + expose: + - "3000" + environment: + - NODE_ENV=production + - PORT=3000 + - HOSTNAME=0.0.0.0 + restart: unless-stopped + networks: + - hayon_net + + # ============================================================ + # Backend + # ============================================================ + backend: + build: + context: . + dockerfile: backend/Dockerfile + container_name: hayon_backend + expose: + - "5000" + env_file: + - backend/.env + environment: + - NODE_ENV=production + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-hayon}:${RABBITMQ_PASS:-hayon_secret}@rabbitmq:5672 + - REDIS_HOST=redis + - REDIS_PORT=6379 + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - hayon_net + + # ============================================================ + # Worker + # ============================================================ + worker: + build: + context: . + dockerfile: backend/Dockerfile + container_name: hayon_worker + command: ["node", "dist/workers/index.js"] + env_file: + - backend/.env + environment: + - NODE_ENV=production + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-hayon}:${RABBITMQ_PASS:-hayon_secret}@rabbitmq:5672 + - REDIS_HOST=redis + - REDIS_PORT=6379 + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - hayon_net + + # ============================================================ + # Redis + # ============================================================ + redis: + image: redis:7-alpine + container_name: hayon_redis + expose: + - "6379" + volumes: + - redis_data:/data + command: redis-server --appendonly yes + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - hayon_net + + # ============================================================ + # RabbitMQ + # ============================================================ + rabbitmq: + build: + context: ./rabbitmq + dockerfile: Dockerfile + container_name: hayon_rabbitmq + expose: + - "5672" + ports: + - "127.0.0.1:15672:15672" + volumes: + - rabbitmq_data:/var/lib/rabbitmq + environment: + RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-hayon} + RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASS:-hayon_secret} + healthcheck: + test: ["CMD", "rabbitmq-diagnostics", "ping"] + interval: 15s + timeout: 10s + retries: 10 + start_period: 30s + restart: unless-stopped + networks: + - hayon_net + +volumes: + redis_data: + rabbitmq_data: + +networks: + hayon_net: + driver: bridge diff --git a/docs/NOTIFICATIONS_MASTERCLASS.md b/docs/NOTIFICATIONS_MASTERCLASS.md new file mode 100644 index 0000000..6b394c0 --- /dev/null +++ b/docs/NOTIFICATIONS_MASTERCLASS.md @@ -0,0 +1,1370 @@ +# 🔔 NOTIFICATIONS MASTERCLASS +## The Complete Engineering Guide — Hayon Project + +--- + +## TABLE OF CONTENTS + +1. [The Big Picture — What Even Is a "Notification"?](#1-the-big-picture) +2. [The Notification Stack in Hayon](#2-the-notification-stack) +3. [How HTTP Falls Short — Why We Need Real-Time](#3-why-http-falls-short) +4. [WebSockets — The Protocol](#4-websockets-the-protocol) +5. [Socket.IO — WebSockets With Superpowers](#5-socketio) +6. [Firebase Cloud Messaging (FCM) — Push Notifications](#6-firebase-fcm) +7. [The Two Firebase SDKs: Admin vs Client](#7-two-firebase-sdks) +8. [Backend: The Notification Data Model](#8-notification-model) +9. [Backend: The Repository Layer](#9-notification-repository) +10. [Backend: Socket.IO Config — `config/socket.ts`](#10-socket-config) +11. [Backend: app.ts — Wiring Everything Together](#11-app-ts) +12. [Backend: Firebase Admin Setup — `config/firebase.ts`](#12-firebase-admin) +13. [Backend: The Service Layer — `notification.service.ts`](#13-notification-service) +14. [Backend: Firebase Controller — `firebase.controller.ts`](#14-firebase-controller) +15. [Backend: The Routes](#15-routes) +16. [Frontend: Firebase Client — `lib/firebase.ts`](#16-firebase-client) +17. [Frontend: Socket Context — `SocketContext.tsx`](#17-socket-context) +18. [Frontend: The Hook — `useNotifications.ts`](#18-usenotifications) +19. [Frontend: The UI — `NotificationDropdown.tsx`](#19-notification-dropdown) +20. [Environment Variables: Where Everything Comes From](#20-env-variables) +21. [Firebase Console: Every Step You Did](#21-firebase-console) +22. [The Full Journey: From Event to Bell Icon](#22-full-journey) +23. [What's Incomplete / Can Be Improved](#23-improvements) + +--- + +## 1. The Big Picture + +A notification system has **three fundamental jobs**: + +| Job | Mechanism | When | +|---|---|---| +| Tell the user *right now* while they're online | WebSocket / Socket.IO | Instant — milliseconds | +| Tell the user *even when they've closed the browser tab* | Push Notification (FCM) | Anytime, even offline | +| Let the user *revisit* old notifications | Database + REST API | On demand | + +Hayon does **all three simultaneously**. + +``` +Admin approves post + │ + ▼ +NotificationService.createNotification() + │ + ├──── 1. Save to MongoDB ──────► User reads history later via REST API + │ + ├──── 2. Socket.IO emit ───────► Bell icon updates in real-time (if online) + │ + └──── 3. FCM push ─────────────► Browser/device notification (even offline) +``` + +--- + +## 2. The Notification Stack in Hayon + +| Layer | Technology | File | +|---|---|---| +| Real-time protocol | Socket.IO (over WebSocket) | `config/socket.ts` | +| Push notifications | Firebase Cloud Messaging | `config/firebase.ts` | +| Persistence | MongoDB + Mongoose | `models/notification.model.ts` | +| Data access | Repository pattern | `repositories/notifications.repository.ts` | +| Business logic | Service layer | `services/notification.service.ts` | +| HTTP API | Express routes + controller | `routes/notification.routes.ts`, `controllers/notification.controller.ts` | +| FCM token management | Firebase controller | `controllers/firebase.controller.ts` | +| Frontend connection | React Context | `context/SocketContext.tsx` | +| Frontend state | Custom hook | `hooks/useNotifications.ts` | +| Frontend UI | React component | `components/NotificationDropdown.tsx` | +| Firebase client SDK | Firebase JS SDK | `lib/firebase.ts` | + +--- + +## 3. Why HTTP Falls Short — The Polling Problem + +### The Normal HTTP Request-Response Cycle + +``` +Browser Server + │──── GET /notifications ────►│ + │◄─── 200 OK [data] ──────────│ +``` + +HTTP is **stateless** and **unidirectional**. The server can ONLY respond — it cannot initiate. So how would the server tell you about a new notification when it happens? + +**Option A: Polling (Bad)** +``` +every 5 seconds: + browser → GET /notifications → server +``` +- Hammers the server with useless requests +- Still delayed by up to 5 seconds +- Wastes bandwidth, CPU, and money + +**Option B: Long Polling (Better, still ugly)** +``` +Browser: "Hey, respond only when something new happens" +Server: ... holds connection open for 30s ... +Server: "OK now something happened" → responds +Browser: immediately opens another long-poll connection +``` +- Better latency, but still overhead per connection +- Hard to scale + +**Option C: WebSockets (The Right Way)** +``` +Browser ──── Upgrade: websocket ────► Server +Browser ◄═══════ persistent tunnel ══════════► Server + (both sides can send at any time) +``` + +--- + +## 4. WebSockets — The Protocol + +### The Handshake + +WebSocket starts as an HTTP request with a special header: + +``` +GET /socket HTTP/1.1 +Host: dev.hayon.site:5000 +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Version: 13 +``` + +The server responds: +``` +HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +``` + +`101 Switching Protocols` = "OK, we're no longer doing HTTP. We're now in WebSocket mode." + +After this handshake, the TCP connection stays open. Both client and server can send **frames** at any time. No more request-response cycle. + +### WebSocket Frames + +Data is sent in frames, not HTTP bodies. Each frame has: +- Opcode (is this text? binary? ping? close?) +- Payload length +- Masking key (client → server messages are always masked for security) +- Actual data + +### Raw WebSocket vs Socket.IO + +| Feature | Raw WebSocket | Socket.IO | +|---|---|---| +| Auto-reconnect | ❌ You write it | ✅ Built-in | +| Named events | ❌ Just message strings | ✅ `emit("notification", data)` | +| Rooms / namespaces | ❌ You write it | ✅ Built-in | +| Fallback (polling) | ❌ | ✅ Falls back to polling if WS fails | +| Heartbeat / ping-pong | ❌ You write it | ✅ Built-in | +| Middleware | ❌ | ✅ `io.use((socket, next) => ...)` | + +Hayon uses **Socket.IO** because of rooms (each user is in their own room = their userId). + +--- + +## 5. Socket.IO — WebSockets With Superpowers + +### Core Concepts + +**Server** (`socket.io` npm package): +```typescript +import { Server } from "socket.io"; +const io = new Server(httpServer); +``` + +**Client** (`socket.io-client` npm package): +```typescript +import { io } from "socket.io-client"; +const socket = io("https://api.example.com"); +``` + +### Rooms + +A **room** is a named channel. A socket can join multiple rooms. When you emit to a room, all sockets in that room receive it. + +```typescript +socket.join("room-name"); // join a room +io.to("room-name").emit("event"); // emit to everyone in room +``` + +In Hayon: +- Each user joins a room named after **their own userId** +- When a notification is created for `recipientId`, we emit to `io.to(recipientId)` +- Only that user's browser receives it + +### Namespaces + +A namespace is a communication channel that allows you to split the logic over a single shared connection. We don't use namespaces in Hayon (using the default `/` namespace). + +### Socket.IO Middleware + +```typescript +io.use((socket, next) => { + // This runs before any connection is established + // next() = allow connection + // next(new Error()) = reject connection +}); +``` + +This is how we authenticate: check the JWT from `socket.handshake.auth.token` before letting the user join. + +--- + +## 6. Firebase Cloud Messaging (FCM) — Push Notifications + +### What Problem Does FCM Solve? + +Socket.IO = great when browser tab is **open**. +FCM = works even when browser tab is **closed**. + +When a user closes your app but a post gets approved, how do you notify them? The browser is still running (as a background process on the OS) and can receive **web push notifications** even if no tab is open. + +### The Architecture + +``` +Your Backend (Node.js) + │ + │ POST to Google's FCM API + │ (using firebase-admin SDK) + ▼ + Google FCM Servers + │ + │ Push delivery + ▼ + User's Browser / Device + │ + │ Service Worker receives it + ▼ + OS-level notification popup +``` + +### The Token: The Device Address + +FCM identifies each browser/device with a unique **FCM token** (also called registration token). Think of it like a phone number for that specific browser on that specific device. + +- Changes when: user clears site data, reinstalls browser, token expires +- Multiple tokens per user = user logged in on multiple devices/browsers +- This is why `fcmTokens` in the user model is an **array** `[String]` + +### How Your Browser Gets Registered + +1. User opens Hayon +2. Browser asks: "Can we send you notifications?" +3. User clicks "Allow" +4. Firebase SDK generates a unique FCM token for this browser +5. Frontend sends this token to your backend via `POST /api/firebase/save-token` +6. Backend saves it to `user.fcmTokens[]` in MongoDB +7. Later when a notification fires, backend sends to ALL tokens for that user + +### VAPID Key + +`NEXT_PUBLIC_VAPID_KEY` is the **Voluntary Application Server Identification** key. This is part of the **Web Push Protocol** (RFC 8292). It proves to the browser that the push message came from *your* server, not some attacker. FCM uses this to authorize your backend to send messages to browsers subscribed to your app. + +--- + +## 7. The Two Firebase SDKs + +This is a common confusion point. There are **two completely different Firebase SDKs**: + +| | Firebase Admin SDK | Firebase Client SDK | +|---|---|---| +| Used in | Backend (Node.js) | Frontend (Browser) | +| Package | `firebase-admin` | `firebase` | +| Auth | Service Account JSON (private key) | Firebase web config (public keys) | +| Powers | Sending messages, managing users | Receiving token, subscribing | +| Your file | `backend/src/config/firebase.ts` | `frontend/src/lib/firebase.ts` | + +**Admin SDK**: Authenticated with a **private service account** that has God-mode access. Used to SEND push notifications via `admin.messaging().sendEachForMulticast()`. + +**Client SDK**: Used in the browser to get the FCM token via `getToken(messaging, { vapidKey })`. This token is then handed to your backend to store. + +--- + +## 8. The Notification Data Model + +**File**: `backend/src/models/notification.model.ts` + +```typescript +export interface INotification extends Document { + recipient: mongoose.Types.ObjectId; + type: "info" | "warning" | "success" | "error"; + message: string; + read: boolean; + image?: string; + link?: string; + relatedResource?: { + type: "post" | "login"; + id: mongoose.Types.ObjectId; + model: "Post" | "RefreshToken"; + }; + createdAt: Date; +} +``` + +### Line-by-Line Schema Explanation + +```typescript +import mongoose, { Schema, Document } from "mongoose"; +``` +- `Schema` = the blueprint class for defining a MongoDB document shape +- `Document` = Mongoose's base interface for MongoDB documents (adds `_id`, `save()`, etc.) + +```typescript +recipient: { type: Schema.Types.ObjectId, ref: "User", required: true }, +``` +- `Schema.Types.ObjectId` = a 12-byte MongoDB identifier (not a plain string) +- `ref: "User"` = tells Mongoose "when you `.populate()` this field, look in the `User` collection" +- `required: true` = a notification without a recipient makes no sense + +```typescript +type: { + type: String, + enum: ["info", "warning", "success", "error"], + default: "info", +}, +``` +- `enum` = MongoDB-level constraint. Mongo will reject any value not in this list +- These four types map to the icon shown in the frontend (info=blue, warning=yellow, success=green, error=red) + +```typescript +read: { type: Boolean, default: false }, +``` +- Every notification starts unread. The pulsing red dot on the bell uses this. + +```typescript +image?: string; +link?: string; +``` +- `image` = optional S3 URL of the post's media (shown as thumbnail in the dropdown) +- `link` = optional URL to navigate to when the notification is clicked + +```typescript +relatedResource: { + type: { type: String, enum: ["post", "login"] }, + id: { + type: Schema.Types.ObjectId, + required: true, + refPath: "relatedResource.model", // ← dynamic populate! + }, + model: { + type: String, + required: true, + enum: ["Post", "RefreshToken"], + }, +}, +``` +- `refPath` = **dynamic population**. Instead of hardcoding `ref: "Post"`, we look at the `model` field at runtime to know which collection to populate from. This single notification schema can link to EITHER a `Post` OR a `RefreshToken` document. + +```typescript +{ timestamps: true } +``` +- Mongoose automatically adds `createdAt` and `updatedAt` fields +- `createdAt` is used in the frontend: `formatDistanceToNow(new Date(notification.createdAt))` + +--- + +## 9. The Notification Repository + +**File**: `backend/src/repositories/notifications.repository.ts` + +```typescript +export class NotificationRepository { + static async create(data: Partial) { + return Notification.create(data); + } +``` +- `Notification.create(data)` = inserts one document into MongoDB and returns the saved document with `_id` and timestamps populated +- `Partial` = we don't need to provide ALL fields — some have defaults (`read: false`) + +```typescript + static async findByUserId(userId: string, limit: number, skip: number) { + return Notification.find({ recipient: userId }) + .sort({ createdAt: -1 }) + .skip(skip) + .limit(limit) + .populate("relatedResource.id"); + } +``` +- `find({ recipient: userId })` = query: only get notifications belonging to this user +- `.sort({ createdAt: -1 })` = `-1` = descending = newest first +- `.skip(skip)` = pagination: skip the first N results +- `.limit(limit)` = at most return N results +- `.populate("relatedResource.id")` = replace the ObjectId stored in `relatedResource.id` with the full document from the referenced collection (Post or RefreshToken). This is how the frontend can read `notification.relatedResource.id?.content?.mediaItems?.[0]?.s3Url` — after populate, `.id` is the actual Post object, not just an ObjectId. + +```typescript + static async countByUserId(userId: string) { + return Notification.countDocuments({ recipient: userId }); + } +``` +- `countDocuments` = optimized count query. Does NOT fetch all documents — just returns the number +- Used to calculate total pages for pagination + +```typescript + static async markAsRead(notificationId: string, userId: string) { + return Notification.findOneAndUpdate( + { _id: notificationId, recipient: userId }, + { read: true }, + { new: true }, + ); + } +``` +- We query by BOTH `_id` AND `recipient` (the userId). Why? **Security.** Without the `recipient` check, any user could mark any other user's notification as read by guessing an ObjectId. +- `{ new: true }` = return the updated document, not the old one + +```typescript + static async markAllAsRead(userId: string) { + return Notification.updateMany({ recipient: userId, read: false }, { read: true }); + } +``` +- `updateMany` = update all matching documents in one database roundtrip (efficient) +- `read: false` filter = only update unread ones (no-op on already-read notifications) + +--- + +## 10. Socket.IO Config — `config/socket.ts` + +Full file, line by line: + +```typescript +import { Server, Socket } from "socket.io"; +``` +- `Server` = the Socket.IO server class. You wrap your HTTP server with this. +- `Socket` = represents one individual client connection. Each browser tab = one Socket. + +```typescript +import { Server as HttpServer } from "http"; +``` +- We alias it `HttpServer` to avoid name collision with Socket.IO's `Server`. Both are called `Server` in their respective packages. + +```typescript +import jwt from "jsonwebtoken"; +import { ENV } from "./env"; +import logger from "../utils/logger"; +``` +- We import JWT to **verify tokens** inside Socket.IO middleware +- This is authentication on the WebSocket layer — same logic as HTTP middleware but for sockets + +```typescript +interface AuthTokenPayload { + id: string; + role: string; +} +``` +- TypeScript tells us what shape the decoded JWT has. We know our JWTs contain `id` and `role`. + +```typescript +export const initSocket = (httpServer: HttpServer) => { + const io = new Server(httpServer, { + cors: { + origin: [ENV.APP.FRONTEND_URL, "localhost:3000"], + credentials: true, + }, + }); +``` +- `new Server(httpServer, ...)` = Socket.IO **attaches itself to the existing HTTP server**. It does NOT create a new port. Both HTTP (Express) and WebSocket traffic go through the same port (5000). The HTTP upgrade mechanism distinguishes WebSocket connections. +- `cors` = Socket.IO has its own CORS layer separate from Express's CORS. Yes, you need to configure it twice — one for HTTP, one for WebSocket. +- `credentials: true` = allow cookies to be sent (needed for authentication) + +```typescript + io.use((socket: Socket, next) => { + const token = socket.handshake.auth.token; +``` +- `io.use(...)` = **global middleware** that runs for EVERY connection attempt before `io.on("connection")` fires +- `socket.handshake` = the HTTP handshake data from the initial upgrade request +- `socket.handshake.auth` = a special object from Socket.IO's client where you can pass auth data. On the frontend: `io(url, { auth: { token } })` +- This is NOT a cookie or header — it's a Socket.IO-specific auth object sent during connection negotiation + +```typescript + if (!token) { + return next(new Error("Authentication error")); + } +``` +- `next(new Error(...))` = **reject this connection**. The client will get a `connect_error` event. No connection is established. +- `return next(null)` or just `next()` = allow the connection + +```typescript + try { + const decoded = jwt.verify(token, ENV.AUTH.ACCESS_TOKEN_SECRET) as AuthTokenPayload; + socket.data.user = decoded; + return next(); + } catch (err) { + logger.error("Authentication error", err); + return next(new Error("Authentication error")); + } +``` +- `jwt.verify(token, secret)` = cryptographically validates the token and decodes it. Throws if expired or tampered. +- `socket.data.user = decoded` = **attaches data to the socket object** for use later. `socket.data` is a per-socket store — safe to add arbitrary properties here. +- `as AuthTokenPayload` = TypeScript cast — we're telling TS "trust us, this is the right shape" + +```typescript + io.on("connection", (socket: Socket) => { + const userId = socket.data.user?.id; + if (userId) { + logger.info(`User connected to socket: ${userId}`); + socket.join(userId); + } +``` +- `io.on("connection", ...)` = fires for every successfully authenticated connection (middleware already passed) +- `socket.data.user?.id` = reads the user data we attached in middleware. Optional chain `?.` guards against impossible case where middleware didn't set it. +- `socket.join(userId)` = **this is the magic line**. The socket joins a room named after the userId. Later, `io.to(recipientId).emit("notification", data)` === "emit to the room named recipientId" === "emit to all browser tabs this user has open" + +```typescript + socket.on("disconnect", () => { + logger.info(`User disconnected: ${userId}`); + }); + }); + + return io; +}; +``` +- `socket.on("disconnect")` = fires when this tab closes, network drops, or user explicitly disconnects +- We return `io` so `app.ts` can export it globally: `export let io: any;` + +--- + +## 11. app.ts — Wiring Everything Together + +```typescript +export let io: any; +``` +**Line 46.** This is a global mutable variable. It starts as `undefined`. It gets assigned when the server starts. Why export it? So `notification.service.ts` can import it: +```typescript +import { io } from "../app"; +``` +This is a **circular-import-friendly** pattern: the service doesn't import from socket config, it imports the already-initialized `io` from `app.ts`. + +```typescript +if (ENV.APP.NODE_ENV === "production") { + const httpServer = createServer(expressInstance); + httpServer.listen(ENV.APP.PORT, () => { + io = initSocket(httpServer); + }); +} else { + const httpsServer = https.createServer(options, expressInstance); + httpsServer.listen(ENV.APP.PORT, () => { + io = initSocket(httpsServer); + }); +} +``` +- Production = plain HTTP (because Nginx/load balancer handles SSL termination) +- Development = HTTPS directly (using your `dev.hayon.site` SSL certs from mkcert) +- `initSocket` is called **inside the `.listen()` callback** — this guarantees the server is actually listening before Socket.IO attaches. If you called it before `.listen()`, Socket.IO would attach to a server that hasn't started yet. +- `io = initSocket(...)` — this is where the global `io` gets its value. + +--- + +## 12. Firebase Admin Setup — `config/firebase.ts` + +```typescript +import admin from "firebase-admin"; +import serviceAccount from "../serviceAccountKey.json"; + +admin.initializeApp({ + credential: admin.credential.cert(serviceAccount as admin.ServiceAccount), +}); + +export default admin; +``` + +**Line 1**: `firebase-admin` is the **server-side** Firebase SDK. It has elevated privileges. + +**Line 2**: `serviceAccountKey.json` is downloaded from Google Cloud. It contains: +- `type: "service_account"` — Google knows this is a machine, not a human +- `project_id: "hayon-app"` — which Firebase project +- `private_key_id` — ID of the private key used for signing +- `private_key` — the actual RSA private key (BEGIN PRIVATE KEY). This is what Google uses to VERIFY that push notification requests are from you. +- `client_email` — the service account's email (like a username): `firebase-adminsdk-fbsvc@hayon-app.iam.gserviceaccount.com` +- Various OAuth/token URLs + +**Line 4-6**: `admin.initializeApp({ credential: admin.credential.cert(...) })` — initializes the Firebase Admin SDK with service account credentials. This must be called ONCE before any `admin.messaging()` calls. It authenticates against Google's servers using the private key. + +**Line 8**: `export default admin` — we export the initialized admin object so it can be imported in `notification.service.ts`. + +> ⚠️ **IMPORTANT**: `serviceAccountKey.json` is in your `.gitignore`. It contains a private key — if this leaks, someone can send push notifications as Hayon to all your users. Never commit it. + +--- + +## 13. The Service Layer — `notification.service.ts` + +This is the **brain** of the notification system. Every notification in the app goes through this one function. + +```typescript +import { INotification } from "../models/notification.model"; +import mongoose from "mongoose"; +import { io } from "../app"; +import { NotificationRepository } from "../repositories/notifications.repository"; +import admin from "../config/firebase"; +import User from "../models/user.model"; +``` + +- `import { io } from "../app"` — imports the initialized Socket.IO server instance. At the time this module is imported, `io` may still be `undefined` (before `.listen()` fires). The `if (io)` check on line 42 guards this. +- `import admin from "../config/firebase"` — the initialized Firebase Admin SDK + +### `createNotification()` — The Main Method + +```typescript +static async createNotification( + recipientId: string, + message: string, + type: INotification["type"] = "info", + relatedResource?: { + type: "post" | "login"; + id: string | mongoose.Types.ObjectId; + model: "Post" | "RefreshToken"; + }, + options?: { + image?: string; + link?: string; + }, +) +``` +- `recipientId: string` — MongoDB ObjectId as string (of the user who will receive it) +- `INotification["type"]` — TypeScript utility type. Instead of rewriting `"info" | "warning" | "success" | "error"`, we reference the existing type on the interface. If the interface changes, this auto-updates. +- `= "info"` — default parameter: if caller doesn't pass a type, it defaults to info +- `relatedResource?` — the `?` means optional. Destructured with nested types inline. +- `options?` — optional bag of extra fields (image URL and link) + +```typescript +const notificationData: Partial = { + recipient: new mongoose.Types.ObjectId(recipientId), + message, + type, + image: options?.image, + link: options?.link, + relatedResource: relatedResource + ? { + ...relatedResource, + id: new mongoose.Types.ObjectId(relatedResource.id), + } + : undefined, +}; +``` +- `Partial` — TypeScript allows leaving fields undefined (since some have defaults in the schema) +- `new mongoose.Types.ObjectId(recipientId)` — converts string `"65a3b..."` to MongoDB ObjectId type. Mongoose can often handle strings, but being explicit is correct. +- `options?.image` — optional chaining: if `options` is undefined, this returns `undefined` instead of throwing +- The `relatedResource` ternary: if we have a relatedResource, spread its properties but **override** the `id` with a freshly cast ObjectId. If not provided, store `undefined`. + +```typescript +const notification = await NotificationRepository.create(notificationData); +``` +- Delegates to repository. Repository calls `Notification.create()` → MongoDB insert → returns saved document with `_id` and timestamps. + +```typescript +if (io) { + io.to(recipientId).emit("notification", notification); +} +``` +- `if (io)` — guards against the edge case where the socket server hasn't started yet (very unlikely in practice, but defensive) +- `io.to(recipientId)` — selects the room named after the user's ID (remember: in `socket.ts`, we did `socket.join(userId)`) +- `.emit("notification", notification)` — sends the event named `"notification"` with the full notification document as payload to ALL sockets in that room (all browser tabs the user has open) +- On the frontend, `socket.on("notification", handleNotification)` catches this + +```typescript +const title = + type === "success" + ? "Hayon - Your post has been approved " + : type === "error" + ? "Hayon - Your post has been rejected" + : "Hayon - New Notification"; +await this.sendPushNotification(recipientId, message, title, options?.image, options?.link); +``` +- Constructs a human-friendly OS notification title based on the type +- Calls the push notification method with the resolved title + +### `sendPushNotification()` — The FCM Method + +```typescript +const user = await User.findById(recipientId).select("fcmTokens"); +``` +- Direct Model access (not through repository) — this is intentional: it's a lightweight read inside the same service +- `.select("fcmTokens")` — only fetch the `fcmTokens` field, not the entire user document. Efficient. + +```typescript +if (!user || !user.fcmTokens || user.fcmTokens.length === 0) { + return; +} +``` +- Three failure conditions, all short-circuit: + 1. User doesn't exist + 2. `fcmTokens` field is null/undefined (shouldn't happen given default `[]`, but defensive) + 3. Array is empty (user never granted notification permission or cleared site data) + +```typescript +const messagePayload: any = { + notification: { + title, + body: message, + }, + tokens: user.fcmTokens, +}; +``` +- This is the FCM **MulticastMessage** shape +- `notification.title` and `notification.body` = what appears in the OS notification popup +- `tokens` = array of FCM tokens — send to ALL of this user's devices simultaneously + +```typescript +if (image) { + messagePayload.notification.image = image; +} +if (link) { + messagePayload.webpush = { + fcm_options: { + link, + }, + }; +} +``` +- `notification.image` = URL of image to show in the push notification (only on supported browsers) +- `webpush.fcm_options.link` = when user clicks the OS notification, open this URL. This is a **Web Push specific** option — only applies to FCM messages targeting web browsers (not mobile apps which use different options). + +```typescript +const response = await admin.messaging().sendEachForMulticast(messagePayload); +``` +- `admin.messaging()` = returns the Firebase Messaging instance +- `.sendEachForMulticast(messagePayload)` = sends the message to each token individually (even though it looks like one call). Why individually? So we can get per-token success/failure. +- Returns a `BatchResponse` with `responses[]` array (one per token) and `failureCount` + +```typescript +if (response.failureCount > 0) { + const failedTokens: string[] = []; + response.responses.forEach((resp: any, idx: number) => { + if (!resp.success) { + failedTokens.push(user.fcmTokens[idx]); + } + }); + if (failedTokens.length > 0) { + await User.updateOne( + { _id: recipientId }, + { $pull: { fcmTokens: { $in: failedTokens } } }, + ); + } +} +``` +- We iterate the responses. `idx` maps 1:1 with `user.fcmTokens` (same order) +- If a token failed, we collect it in `failedTokens` +- `$pull: { fcmTokens: { $in: failedTokens } }` = MongoDB atomic array operation: remove all elements from the `fcmTokens` array that appear in `failedTokens` +- Why? Because failed tokens usually mean the token is **stale** — user cleared browser data, uninstalled, or the token expired. Keeping them wastes FCM API calls. + +--- + +## 14. Firebase Controller — `firebase.controller.ts` + +### `saveToken` + +```typescript +export const saveToken = async (req: Request, res: Response) => { + const { token } = req.body; + const userId = req.auth?.id; +``` +- `token` from `req.body` = the FCM registration token sent by the frontend after the user grants notification permission +- `req.auth?.id` = set by the `authenticate` middleware from the passport JWT strategy + +```typescript + await updateUser(userId, token); +``` + +This calls the repository function: +```typescript +export const updateUser = async (userId: string, token: string) => { + return User.findByIdAndUpdate(userId, { $push: { fcmTokens: token } }); +}; +``` +- `$push` = MongoDB atomic operator: appends `token` to the `fcmTokens` array without loading the whole document +- This means if the same user opens Hayon on 3 devices, they'll have 3 tokens → they get push notifications on all 3 + +### `getToken` + +```typescript +export const getToken = async (req: Request, res: Response) => { + const user = await findUserByIdSafe(userId); + return new SuccessResponse("Token fetched successfully", { data: user.fcmTokens }).send(res); +}; +``` +- Returns the stored FCM tokens for the logged-in user +- `findUserByIdSafe` uses Redis cache (cache-aside pattern) — fast + +--- + +## 15. Routes + +### `notification.routes.ts` + +```typescript +router.use(authenticate); +``` +- Applies auth middleware to ALL routes below it. You must be logged in to read/manage your own notifications. + +```typescript +router.get("/", NotificationController.getNotifications); +router.patch("/:id/read", NotificationController.markRead); +router.patch("/read-all", NotificationController.markAllRead); +``` + +> ⚠️ **Route Order Matters**: `"/read-all"` must be registered BEFORE `"/:id/read"`. If you put `/:id` first, Express would match `read-all` thinking `read-all` is the `:id` parameter! + +### `firebase.routes.ts` + +```typescript +router.post("/save-token", saveToken); +router.get("/get-token", getToken); +router.post("/send-to-all-users", sendPushToUser); +``` + +- `save-token` = called when browser grants notification permission +- `get-token` = debug endpoint to see stored tokens +- `send-to-all-users` = admin broadcast endpoint (currently returns 501 Not Implemented) + +--- + +## 16. Firebase Client — `frontend/src/lib/firebase.ts` + +```typescript +import { initializeApp } from "firebase/app"; +import { getMessaging } from "firebase/messaging"; +``` +- `initializeApp` = Firebase Client SDK initialization (not Admin SDK!) +- `getMessaging` = returns the Firebase Cloud Messaging instance for the browser — used to get FCM tokens + +```typescript +const firebaseConfig = { + apiKey: process.env.NEXT_PUBLIC_FIREBASE_API_KEY, + authDomain: process.env.NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN, + projectId: process.env.NEXT_PUBLIC_FIREBASE_PROJECT_ID, + storageBucket: process.env.NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET, + messagingSenderId: process.env.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID, + appId: process.env.NEXT_PUBLIC_FIREBASE_APP_ID, + measurementId: process.env.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID, +}; +``` +- All `NEXT_PUBLIC_` = exposed to the browser (Next.js convention) +- These are **not secrets**. The Firebase config is a public identifier for your Firebase project. Security is enforced by Firebase security rules and Auth, not by keeping these keys secret. +- `messagingSenderId` = the GCM Sender ID. The browser uses this to know which Firebase project to subscribe to. + +```typescript +const app = initializeApp(firebaseConfig); + +let messaging: any = null; + +if (typeof window !== "undefined") { + messaging = getMessaging(app); +} +``` +- `initializeApp(firebaseConfig)` = creates the Firebase app instance +- `typeof window !== "undefined"` guards against **SSR** (Server-Side Rendering). Next.js runs your code on the server too. `getMessaging` requires a browser environment (it uses Service Workers internally). Without this guard, Next.js would crash on the server. +- `messaging` starts as `null`, then becomes the Messaging instance on the client + +```typescript +export { messaging }; +``` +- Exported for use in components that call `getToken(messaging, { vapidKey })` to get the FCM registration token + +--- + +## 17. The Socket Context — `context/SocketContext.tsx` + +```typescript +"use client"; +``` +- Next.js directive: this component only runs in the browser, not on the server. Required because we use browser APIs (`localStorage`) and WebSocket connections. + +```typescript +import { io, Socket } from "socket.io-client"; +``` +- `socket.io-client` = the CLIENT side of Socket.IO. Different package from the server's `socket.io`. +- `io` = factory function to create a socket connection +- `Socket` = TypeScript type for the socket instance + +```typescript +interface SocketContextType { + socket: Socket | null; + isConnected: boolean; +} +const SocketContext = createContext({ + socket: null, + isConnected: false, +}); +``` +- React Context definition. Default value = `{ socket: null, isConnected: false }` (used if a component consumes the context outside a Provider — defensive default) +- We store the socket instance in React Context so any component deep in the tree can access it without prop-drilling + +```typescript +export const useSocket = () => useContext(SocketContext); +``` +- A simple hook that abstracts `useContext(SocketContext)`. Any component can do `const { socket } = useSocket()` instead of importing both `useContext` and `SocketContext`. + +```typescript +export const SocketProvider = ({ children }: { children: React.ReactNode }) => { + const [socket, setSocket] = useState(null); + const [isConnected, setIsConnected] = useState(false); +``` +- `SocketProvider` wraps the app. It creates the socket connection once and shares it. +- `useState(null)` = starts as `null` (no connection yet) + +```typescript + useEffect(() => { + const token = localStorage.getItem("accessToken"); + if (!token) return; +``` +- `useEffect` with `[]` dep array = runs once when the component mounts (once on page load) +- `localStorage.getItem("accessToken")` = retrieves the JWT. If no token, the user isn't logged in → no socket connection needed → early return +- The JWT is stored in `localStorage` by your auth logic on login + +```typescript + const socketInstance = io(process.env.NEXT_PUBLIC_API_URL || "http://localhost:5000", { + auth: { token }, + withCredentials: true, + }); +``` +- `io(url, options)` = establishes the Socket.IO connection to your backend +- `process.env.NEXT_PUBLIC_API_URL` = `"https://dev.hayon.site:5000/api"` from `.env.local`. Wait — this is the API URL WITH `/api`. But Socket.IO doesn't go through `/api`. So the socket actually connects to `https://dev.hayon.site:5000` (Socket.IO strips the path automatically when using the default namespace) +- `auth: { token }` = this is what `socket.handshake.auth.token` reads on the server in `socket.ts` middleware. You're passing the JWT here. +- `withCredentials: true` = send cookies along with the WebSocket upgrade request + +```typescript + socketInstance.on("connect", () => { + setIsConnected(true); + console.log("Socket connected"); + }); + + socketInstance.on("disconnect", () => { + setIsConnected(false); + console.log("Socket disconnected"); + }); + + setSocket(socketInstance); +``` +- `"connect"` = Socket.IO event fired when connection is established and server has accepted it +- `"disconnect"` = fired when connection drops (network issue, server restart, etc.) +- `setSocket(socketInstance)` = stores the socket in React state, triggering re-render, making it available to all consumers of the context + +```typescript + return () => { + socketInstance.disconnect(); + }; + }, []); +``` +- `useEffect` cleanup function: called when the `SocketProvider` unmounts (page navigation, app teardown) +- `disconnect()` = cleanly closes the WebSocket connection, freeing server resources +- Without this, you'd have zombie connections on the server + +--- + +## 18. The Hook — `useNotifications.ts` + +```typescript +export interface Notification { + _id: string; + message: string; + type: "info" | "warning" | "success" | "error"; + read: boolean; + image?: string; + link?: string; + relatedResource?: { + type: "post" | "login"; + id: any; // Populated post or login data + model: string; + }; + createdAt: string; +} +``` +- Frontend TypeScript interface mirroring the backend Mongoose model +- `id: any` — after `.populate()` on the backend, this is the full Post object (not just ObjectId). `any` is used because the populated shape varies. +- `createdAt: string` — in JSON responses, Mongoose Date objects become ISO 8601 strings: `"2024-02-21T09:45:31.000Z"` + +```typescript +export const useNotifications = () => { + const { socket } = useSocket(); + const [notifications, setNotifications] = useState([]); + const [unreadCount, setUnreadCount] = useState(0); +``` +- Gets the socket from context via `useSocket()` +- Two pieces of state: the array of notifications and the count of unread ones + +### Initial Fetch (REST API) + +```typescript + useEffect(() => { + const fetchNotifications = async () => { + try { + const res = await api.get(`/notifications`); + setNotifications(res.data.data.notifications); + setUnreadCount(res.data.data.notifications.filter((n: any) => !n.read).length); + } catch (err) { + console.error(err); + } + }; + fetchNotifications(); + }, []); +``` +- On mount, fetch historic notifications from REST API +- `res.data.data.notifications` = the response shape is `{ success: true, message: "...", data: { notifications: [], total: N, page: 1, pages: N } }` +- `.filter((n) => !n.read).length` = count locally by filtering — avoids an extra API call + +### Real-Time Listener (Socket.IO) + +```typescript + useEffect(() => { + if (!socket) return; + + const handleNotification = (newNotification: Notification) => { + setNotifications((prev) => [newNotification, ...prev]); + setUnreadCount((prev) => prev + 1); + }; + + socket.on("notification", handleNotification); + + return () => { + socket.off("notification", handleNotification); + }; + }, [socket]); +``` +- `if (!socket) return` — if socket isn't connected yet, don't try to listen +- `[socket]` in dep array = effect re-runs when the socket changes (when it connects or reconnects) +- `socket.on("notification", handleNotification)` = register listener for the `"notification"` event (exactly what the server emits) +- `(prev) => [newNotification, ...prev]` = **functional state update** (safe in React). Prepends new notification to the front of the array. +- `prev + 1` = increment unread count +- **Cleanup**: `socket.off("notification", handleNotification)` = remove this specific listener when socket changes or component unmounts. Without this, you'd register duplicate listeners every time the socket reconnects. + +### Mark As Read + +```typescript + const markAsRead = async (id: string) => { + try { + await api.patch(`/notifications/${id}/read`); + setNotifications((prev) => prev.map((n) => (n._id === id ? { ...n, read: true } : n))); + setUnreadCount((prev) => Math.max(0, prev - 1)); + } catch (err) { + console.error(err); + } + }; +``` +- PATCH to the API first (source of truth) +- Then **optimistic local update**: mutate state locally without waiting for another fetch. `prev.map(...)` creates a new array where only the clicked notification has `read: true`. +- `Math.max(0, prev - 1)` — never go negative (defensive) + +```typescript + const markAllAsRead = async () => { + await api.patch("/notifications/read-all"); + setNotifications((prev) => prev.map((n) => ({ ...n, read: true }))); + setUnreadCount(0); + }; +``` +- PATCH → set all to read locally → unreadCount = 0 + +--- + +## 19. The UI — `NotificationDropdown.tsx` + +### The Bell Button + +```typescript + +``` +- Conditional red dot: only renders if `unreadCount > 0` +- `animate-pulse` = Tailwind's pulsing animation — draws the user's eye +- `absolute` positioned relative to the `relative` button container + +### The Backdrop Blur + +```typescript + + {isOpen && ( +
+ )} + +``` +- `DropdownMenuPortal` = renders into a portal (outside the normal DOM tree, at the document body level). This prevents z-index/overflow issues. +- `fixed inset-0` = full screen overlay +- `backdrop-blur-[2px]` = blurs everything behind the dropdown (glassmorphism effect) +- `animate-in fade-in` = entrance animation using `tailwindcss-animate` + +### `highlightMessage()` — Text Coloring + +```typescript +const highlightMessage = (message: string) => { + const platforms = ["bluesky", "threads", "tumblr", "mastodon", "facebook", "instagram"]; + const statusKeywords = ["pending", "scheduled", "posted"]; + const allKeywords = [...platforms, ...statusKeywords]; + const regex = new RegExp(`(${allKeywords.join("|")})`, "gi"); + const parts = message.split(regex); + return parts.map((part, i) => { ... }); +}; +``` +- Builds a regex like `/(bluesky|threads|tumblr|mastodon|...|pending|scheduled|posted)/gi` +- `gi` = case insensitive, global (find all occurrences) +- `message.split(regex)` = splits the message INTO the matched parts. With a capturing group `()`, `split` includes the matched strings in the result array! +- Example: `"Your post on Instagram has been posted"` → `["Your post on ", "Instagram", " has been ", "posted", ""]` +- Then `parts.map(...)` wraps each matched keyword in a styled `` with its brand color + +### The Rich Media Layout + +```typescript +const postImage = + notification.image || + (notification.relatedResource?.type === "post" + ? notification.relatedResource.id?.content?.mediaItems?.[0]?.s3Url + : null); +const isPosted = notification.message.toLowerCase().includes("successfully posted"); +const useRichLayout = isPosted && postImage; +``` +- `notification.image` = explicit image URL from notification (if set) +- Falls back to `relatedResource.id?.content?.mediaItems?.[0]?.s3Url` — the populated Post document's first media item's S3 URL. This is why we `.populate("relatedResource.id")` in the repository. +- `useRichLayout` = true only for "successfully posted" notifications WITH an image → shows the full-width banner layout with post thumbnail + +--- + +## 20. Environment Variables — Where Each One Comes From + +### Backend — NO env vars needed for notifications specifically + +The notification system on the backend uses: +- `ENV.AUTH.ACCESS_TOKEN_SECRET` — for JWT verification in socket middleware (already exists for auth) +- `serviceAccountKey.json` — hardcoded file path, no env var + +### Frontend `.env.local` + +| Variable | Value | Where It Comes From | +|---|---|---| +| `NEXT_PUBLIC_API_URL` | `https://dev.hayon.site:5000/api` | Your backend server URL | +| `NEXT_PUBLIC_VAPID_KEY` | `BFWmxXmX5HUj7i...` | Firebase Console → Project Settings → Cloud Messaging → Web Push certificates | +| `NEXT_PUBLIC_FIREBASE_API_KEY` | `AIzaSyBFxGm...` | Firebase Console → Project Settings → General → Your apps → Web app config | +| `NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN` | `hayon-app.firebaseapp.com` | Same place, auto-generated | +| `NEXT_PUBLIC_FIREBASE_PROJECT_ID` | `hayon-app` | Your Firebase project ID | +| `NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET` | `hayon-app.firebasestorage.app` | Same place | +| `NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID` | `1094405093952` | Same place — the GCM Sender ID | +| `NEXT_PUBLIC_FIREBASE_APP_ID` | `1:1094405...` | Same place — unique to this web app registration | +| `NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID` | `G-XNDZBY...` | Google Analytics linked to Firebase | + +--- + +## 21. Firebase Console — Every Step You Took + +Here's exactly what you did on the Firebase Console to make this work: + +### Step 1: Create the Project +1. Go to [console.firebase.google.com](https://console.firebase.google.com) +2. Click "Add project" +3. Name it `hayon-app` (this becomes the `project_id`) +4. Enable/disable Google Analytics (you enabled it — that's where `measurementId` comes from) + +### Step 2: Register Your Web App +1. In the project overview, click the `` icon (Web) +2. Register the app with a nickname (e.g., "Hayon Web") +3. Firebase shows you the `firebaseConfig` object: +```javascript +const firebaseConfig = { + apiKey: "AIzaSyBFxGmN3aW38F6KA0rF7dbV3kSc0TRokig", + authDomain: "hayon-app.firebaseapp.com", + projectId: "hayon-app", + storageBucket: "hayon-app.firebasestorage.app", + messagingSenderId: "1094405093952", + appId: "1:1094405093952:web:b105162e509a0875cffda5", + measurementId: "G-XNDZBYH6PQ" +}; +``` +4. You copied all these values into your `frontend/.env.local` + +### Step 3: Enable Cloud Messaging +1. Go to **Project Settings** (gear icon) +2. Click **Cloud Messaging** tab +3. Under **Web configuration**, click **Generate key pair** +4. Firebase generates a VAPID key pair. You copy the **public key** (the long `BFWmxX...` string) +5. Add it as `NEXT_PUBLIC_VAPID_KEY` in `.env.local` +6. The private key stays on Google's servers — you never see it + +### Step 4: Generate Service Account Key +1. Go to **Project Settings** → **Service accounts** tab +2. You'll see "Firebase Admin SDK" — "Node.js" selected by default +3. Click **Generate new private key** +4. Google generates and downloads `serviceAccountKey.json` +5. You placed this file at `backend/src/serviceAccountKey.json` +6. Added it to `.gitignore` + +### What Happened Behind The Scenes When You Generated Keys +- Google Cloud created a **service account**: `firebase-adminsdk-fbsvc@hayon-app.iam.gserviceaccount.com` +- Generated an RSA key pair +- Stored the public key on Google's servers +- Gave you the private key in the JSON file +- Granted this service account the "Firebase Admin" IAM role on your GCP project + +--- + +## 22. The Full Journey: From Admin Approval to Bell Icon + +Let's trace the entire flow for a real event: **Admin approves a post**. + +### Step 1: Admin Clicks "Approve" in Admin Panel +``` +POST /api/admin/posts/:id/approve +``` + +### Step 2: Admin Controller Calls NotificationService +```typescript +// Somewhere in admin posts handler: +await NotificationService.createNotification( + post.userId, // recipientId + `Your post "${post.title}" on Instagram has been posted successfully`, + "success", // type + { type: "post", id: post._id, model: "Post" }, // relatedResource + { + image: post.content.mediaItems[0]?.s3Url, // thumbnail + link: `/posts/${post._id}`, // click destination + } +); +``` + +### Step 3: Notification Service Runs (4 things in sequence) + +**3a. Build notification data object** +```typescript +{ + recipient: ObjectId("65a3b..."), + message: "Your post on Instagram...", + type: "success", + image: "https://hayon-bucket.s3.amazonaws.com/...", + link: "/posts/65a3b...", + relatedResource: { type: "post", id: ObjectId("65a3b..."), model: "Post" } +} +``` + +**3b. MongoDB Insert** +``` +MongoDB ← Notification.create(notificationData) +MongoDB returns: { _id: ObjectId("abc123"), ...allFields, createdAt: Date.now() } +``` + +**3c. Socket.IO Emit** (if user is online) +``` +io.to("65a3b...") ← room named after userId + .emit("notification", {...}) ← sends full notification document +``` + +**3d. FCM Push** (always attempted) +``` +MongoDB ← User.findById(recipientId).select("fcmTokens") +→ user.fcmTokens = ["eXBsK3...", "f9Kl2..."] (2 devices) + +Google FCM API ← admin.messaging().sendEachForMulticast({ + notification: { title: "Hayon - Your post has been approved", body: "..." }, + tokens: ["eXBsK3...", "f9Kl2..."] +}) + +Google FCM → User's Chrome on laptop (OS notification popup) +Google FCM → User's Firefox on phone (OS notification popup) +``` + +### Step 4: Frontend Receives (Two Paths) + +**Path A: User has tab open (Socket.IO)** +``` +socket.on("notification", handleNotification) + → setNotifications(prev => [newNotification, ...prev]) + → setUnreadCount(prev => prev + 1) + → Bell icon re-renders with red pulsing dot +``` + +**Path B: User's tab is closed (FCM)** +``` +Service Worker receives push message +→ OS shows notification: "Hayon - Your post has been approved" + "Your post on Instagram has been posted successfully" +→ User clicks notification → browser opens → navigates to /posts/65a3b... +``` + +### Step 5: User Opens Notification Dropdown +``` +Initial render: useEffect → GET /api/notifications +→ NotificationController.getNotifications +→ NotificationService.getUserNotifications +→ NotificationRepository.findByUserId (paginated, sorted, populated) +→ Returns: { notifications: [...], total: 5, page: 1, pages: 1 } +→ setNotifications([...]) +→ setUnreadCount(notifications.filter(n => !n.read).length) +``` + +### Step 6: User Clicks the Notification +``` +onClick handler: +1. markAsRead(notification._id) + → PATCH /api/notifications/abc123/read + → NotificationRepository.markAsRead (requires both _id AND userId — security!) + → setNotifications(prev => prev.map(n => n._id === id ? {...n, read: true} : n)) + → setUnreadCount(prev => Math.max(0, prev - 1)) + +2. notification.link = "/posts/65a3b..." + → router.push("/posts/65a3b...") + → Next.js navigates to that page +``` + +--- + +## 23. What's Incomplete / Can Be Improved + +### 1. `sendPushToUser` is not implemented +```typescript +export const sendPushToUser = async (req: Request, res: Response) => { + return new ErrorResponse("Not implemented").send(res); +}; +``` +The admin broadcast endpoint returns 501. You could implement: get all FCM tokens from all users, batch into groups of 500 (FCM multicast limit), send. + +### 2. Token Deduplication +`updateUser` uses `$push` blindly: +```typescript +User.findByIdAndUpdate(userId, { $push: { fcmTokens: token } }); +``` +If the same browser registers twice, you'd have duplicate tokens. Fix: +```typescript +User.findByIdAndUpdate(userId, { $addToSet: { fcmTokens: token } }); +``` +`$addToSet` = only adds if not already in the array. + +### 3. API URL vs Socket URL +```typescript +const socketInstance = io(process.env.NEXT_PUBLIC_API_URL || "http://localhost:5000", {...}); +``` +`NEXT_PUBLIC_API_URL` is `https://dev.hayon.site:5000/api`. Socket.IO would try to connect to `https://dev.hayon.site:5000/api` which is technically wrong — the socket endpoint is at the root `https://dev.hayon.site:5000`. Socket.IO usually handles this but it's cleaner to have a separate `NEXT_PUBLIC_SOCKET_URL` env var. + +### 4. No Notification Deletion +The UI lets you mark as read but never delete. Old notifications accumulate forever. You should add: +- `DELETE /api/notifications/:id` endpoint +- A cron job that deletes notifications older than 30 days + +### 5. Sound / Vibration +Push notifications can include sound. FCM supports `notification.sound` for mobile. Not implemented. + +### 6. Service Worker for Background Push +The frontend FCM setup (`lib/firebase.ts`) gets the token, but you need a `public/firebase-messaging-sw.js` **Service Worker** to actually show OS notifications when the tab is closed. Without a service worker, FCM push messages are only received when the tab is open (where Socket.IO already handles it). The service worker file would look like: +```javascript +importScripts('https://www.gstatic.com/firebasejs/10.0.0/firebase-app-compat.js'); +importScripts('https://www.gstatic.com/firebasejs/10.0.0/firebase-messaging-compat.js'); + +firebase.initializeApp({ messagingSenderId: "1094405093952", ... }); +const messaging = firebase.messaging(); +messaging.onBackgroundMessage((payload) => { + self.registration.showNotification(payload.notification.title, { + body: payload.notification.body, + icon: '/icon.png', + }); +}); +``` + +### 7. `console.log(res)` in Production +```typescript +const res = await api.get(`/notifications`); +console.log(res); +``` +This `console.log` in `useNotifications.ts` will log on every page load in production. Remove it. + +--- + +*End of Notifications Masterclass. Every line of code, every concept, every system interaction explained.* diff --git a/docs/S3_MASTERCLASS.md b/docs/S3_MASTERCLASS.md new file mode 100644 index 0000000..a7ad5ed --- /dev/null +++ b/docs/S3_MASTERCLASS.md @@ -0,0 +1,1250 @@ +# 🪣 AWS S3 — The Complete Masterclass +> **Project: Hayon** | Everything you need to know about S3, from zero to production + +--- + +## Table of Contents + +1. [What is AWS S3?](#1-what-is-aws-s3) +2. [Core S3 Concepts](#2-core-s3-concepts) +3. [IAM & Credentials — How AWS Knows Who You Are](#3-iam--credentials--how-aws-knows-who-you-are) +4. [Setting Up an S3 Bucket](#4-setting-up-an-s3-bucket) +5. [Bucket Policies & CORS — Controlling Access](#5-bucket-policies--cors--controlling-access) +6. [Presigned URLs — The Heart of Your Upload Strategy](#6-presigned-urls--the-heart-of-your-upload-strategy) +7. [How Files Are Stored (Key Structure)](#7-how-files-are-stored-key-structure) +8. [Your S3 Services — Full Line-by-Line Breakdown](#8-your-s3-services--full-line-by-line-breakdown) +9. [Upload Flow — Frontend to S3 to Database](#9-upload-flow--frontend-to-s3-to-database) +10. [Download Flow — How Images Are Viewed](#10-download-flow--how-images-are-viewed) +11. [Delete Flow — Cleaning Up Old Data](#11-delete-flow--cleaning-up-old-data) +12. [Where S3 Is Used in Hayon](#12-where-s3-is-used-in-hayon) +13. [Next.js & Image Optimization](#13-nextjs--image-optimization) +14. [Security Best Practices](#14-security-best-practices) +15. [Code Review — Mistakes & Improvements](#15-code-review--mistakes--improvements) + +--- + +## 1. What is AWS S3? + +**Amazon Simple Storage Service (S3)** is a cloud-based **object storage** service. Think of it like a hard drive on the internet, but with unlimited capacity, 99.999999999% (11 nines) durability, and global CDN support. + +### Object Storage vs. File Storage vs. Block Storage + +| Type | What it is | Example | Used for | +|------|-----------|---------|----------| +| **Block Storage** | Raw disk blocks, like a hard drive | AWS EBS | Databases, OS disks | +| **File Storage** | Folder/file hierarchy | NFS, Google Drive | Shared file systems | +| **Object Storage** | Flat key-value store (key → blob) | **AWS S3** | Images, videos, backups, static assets | + +In S3, there are no actual "folders". What looks like `profiles/user123.jpg` is just a **key**. The `/` is part of the key name — S3 console just renders it as a folder for your convenience. + +### Why Not Store Images in Your Database or EC2's Disk? + +| Approach | Problem | +|----------|---------| +| **Store in MongoDB (base64)** | Documents balloon in size. BSON limit is 16MB per document. Extremely slow queries. | +| **Store on EC2 disk** | When your EC2 crashes or restarts, the disk content is gone (if ephemeral). No CDN. No replication. | +| **Store in S3** ✅ | Durable, cheap, fast, CDN-ready, secure, and infinitely scalable. | + +--- + +## 2. Core S3 Concepts + +### Bucket +A **bucket** is a top-level container for your objects. Think of it as a "root folder" in the cloud. Buckets have globally unique names — no two buckets in all of AWS can have the same name. + +``` +hayon-app-images ← Bucket name +``` + +### Object +An **object** is anything stored in the bucket: a photo, video, PDF, JSON file. An object has: +- **Key**: The unique identifier (the "path") +- **Body**: The actual binary data +- **Metadata**: Extra info like `Content-Type`, `ETag`, etc. +- **ETag**: A hash of the object's content (like a fingerprint) + +### Key +The **key** is treated like a file path. It's just a string. Example: + +``` +profiles/64abc123.jpg +posts/64abc123/post_uuid/post-media.jpg +temp/64abc123/uuid.png +``` + +### Region +AWS S3 buckets exist in a specific **region** (geographical data center). In Hayon, you use: +``` +ap-south-1 ← Asia Pacific (Mumbai) +``` + +Keeping your bucket close to your EC2 instance reduces latency and eliminates data transfer costs. + +### URL Structure +Once a file is uploaded, it can be accessed publicly (if allowed) via: +``` +https://{bucket-name}.s3.{region}.amazonaws.com/{key} + +# Example: +https://hayon-app-images.s3.ap-south-1.amazonaws.com/profiles/user123.jpg +``` + +--- + +## 3. IAM & Credentials — How AWS Knows Who You Are + +**IAM (Identity and Access Management)** is AWS's permission system. Before your Node.js backend can do anything with S3, AWS needs to verify that your server is authorized. + +### The Flow + +``` +Your EC2 / Local Server + │ + ▼ + AWS SDK sends request + with: + - Access Key ID ← "who am I" + - Secret Access Key ← "my password" + │ + ▼ + AWS IAM checks: + "Does this key have permission to do this action on this bucket?" + │ + ├── YES → Request proceeds ✅ + └── NO → 403 AccessDenied ❌ +``` + +### Creating an IAM User for S3 + +1. Go to **IAM → Users → Create User** +2. Name it something like `hayon-s3-user` +3. Attach policy: `AmazonS3FullAccess` (or a custom restrictive policy — see security section) +4. Generate **Access Key** → download the `.csv` +5. Paste into your `.env`: +```env +AWS_ACCESS_KEY_ID=AKIA... +AWS_SECRET_ACCESS_KEY=abc123secret... +AWS_REGION=ap-south-1 +AWS_S3_BUCKET_NAME=hayon-app-images +``` + +### In Your `env.ts` + +```typescript +// backend/src/config/env.ts — Lines 70–75 +AWS: { + ACCESS_KEY_ID: required("AWS_ACCESS_KEY_ID"), // Line 71: Reads from .env, throws if missing + SECRET_ACCESS_KEY: required("AWS_SECRET_ACCESS_KEY"), // Line 72: Secret key for signing + REGION: required("AWS_REGION"), // Line 73: e.g., "ap-south-1" + S3_BUCKET_NAME: required("AWS_S3_BUCKET_NAME"), // Line 74: Your bucket name +}, +``` + +The `required()` helper (Lines 4–10) ensures your app **crashes at startup** if any env var is missing. This is brilliant — it's far better to fail fast than to have mysterious runtime errors 10 minutes into a request. + +--- + +## 4. Setting Up an S3 Bucket + +### Step-by-Step + +1. **Create Bucket**: + - Go to **S3 → Create Bucket** + - Name: `hayon-app-images` + - Region: `ap-south-1` + - **Uncheck "Block all public access"** (if you want public profile images — explained below) + +2. **Bucket Versioning**: Off (you don't need old versions for profile photos) + +3. **Encryption**: Default (S3-managed keys — free) + +### Public vs. Private Objects + +| Type | Access | Use Case in Hayon | +|------|--------|-------------------| +| **Public** | Anyone with the URL can view | Profile avatars | +| **Private** | Only via presigned URL or server-side | Post media (debatable) | + +> 🤔 **In Hayon's current setup**, files are uploaded with no explicit ACL, which means they inherit the bucket policy. If your bucket is configured to allow public reads (via bucket policy), then the plain `s3Url` returned by your functions works as a public URI. If the bucket is private (which is the secure default), then these plain URLs would return `403 Access Denied` when loaded in `` tags — and you'd need presigned download URLs instead. + +--- + +## 5. Bucket Policies & CORS — Controlling Access + +### Bucket Policy (Public Read for Profile Images) + +If you want profile images to be publicly accessible via their URL, you add a **Bucket Policy**: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadForProfiles", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::hayon-app-images/profiles/*" + } + ] +} +``` + +This says: "Allow ANYONE to `GetObject` (read) anything inside the `profiles/` prefix." The `*` in `Principal` means "all users — even unauthenticated ones." + +For post media, you might want `posts/*` to be private (require presigned URLs), while `profiles/*` is public. + +### CORS (Cross-Origin Resource Sharing) + +When the **browser** sends a `PUT` request directly to S3 (using a presigned URL), S3 must allow requests from your frontend domain. Without CORS, the browser will block it. + +Go to **S3 → Your Bucket → Permissions → CORS** and add: + +```json +[ + { + "AllowedHeaders": ["*"], + "AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"], + "AllowedOrigins": [ + "http://localhost:3000", + "https://yourproductiondomain.com" + ], + "ExposeHeaders": ["ETag"] + } +] +``` + +- `AllowedHeaders: ["*"]` — Allow any request header (like `Content-Type`) +- `AllowedMethods` — The HTTP methods S3 should allow +- `AllowedOrigins` — Which frontend domains can talk to S3 +- `ExposeHeaders: ["ETag"]` — Expose the ETag back to the browser (useful for verifying uploads) + +**Without this CORS config, your `uploadFiles()` function in `useCreatePost.ts` will fail with a CORS error** when the browser tries to `PUT` the file directly to S3. + +--- + +## 6. Presigned URLs — The Heart of Your Upload Strategy + +### The Problem Without Presigned URLs + +If your backend received the file and then uploaded it to S3: + +``` +User selects file + → sends file to your Express server (slow, wastes bandwidth) + → your server receives the file into memory + → your server uploads file to S3 + → slow, double bandwidth, server memory pressure +``` + +Your EC2 t2.micro would be crushed under video uploads. + +### The Presigned URL Solution + +A **presigned URL** is a temporary URL generated by your backend that allows the **browser to upload directly to S3**, bypassing your server for the actual file data: + +``` +User selects file + ↓ +Browser asks your backend: "I want to upload a 5MB JPG" + ↓ +Backend generates a pre-signed PUT URL (valid for 15 minutes) +Backend returns: { uploadUrl, s3Url, s3Key } + ↓ +Browser PUTs the file directly to that S3 URL + ↓ +Browser tells your backend: "I uploaded it, here's the s3Url" + ↓ +Backend saves the s3Url in MongoDB +``` + +**Your server never touches the file bytes.** This is the gold standard for file uploads in web applications. + +### How the Signature Works (Simplified) + +When you call `getSignedUrl()`, the AWS SDK: +1. Takes the S3 command parameters (bucket, key, content-type, expiry) +2. Signs them with your **secret access key** using HMAC-SHA256 +3. Encodes those parameters + signature into query string parameters + +The resulting URL looks like: +``` +https://hayon-app-images.s3.ap-south-1.amazonaws.com/posts/abc123/uuid.jpg + ?X-Amz-Algorithm=AWS4-HMAC-SHA256 + &X-Amz-Credential=AKID... + &X-Amz-Date=20260221T000000Z + &X-Amz-Expires=900 + &X-Amz-SignedHeaders=content-type%3Bhost + &X-Amz-Signature=abc123signature... +``` + +AWS verifies this signature when the browser calls it. If the signature is valid and not expired, the upload is allowed — **without S3 ever contacting your backend**. + +--- + +## 7. How Files Are Stored (Key Structure) + +### Your Key Naming Strategy + +``` +profiles/{userId}-{timestamp}.{ext} ← Profile pictures +temp/{userId}/{uuid}.{ext} ← Temporary post media (uploaded, not yet confirmed) +posts/{userId}/{postId}/{uuid}.{ext} ← Permanent post media (confirmed post) +``` + +### Why "temp" folder? + +When a user selects images in the Create Post form, you upload them immediately (for a responsive UI). But what if the user never clicks "Post"? You'd have orphaned files in S3 that waste storage. + +The `temp/` folder is meant to hold unconfirmed uploads. Ideally, you'd run a **lifecycle rule** or **S3 cleanup job** to delete files from `temp/` after 24–48 hours. + +> ⚠️ **Currently in your code**, `moveMediaToPermanent()` exists in `s3.upload.service.ts` but is **not actually called** during post creation. The `s3Url` from the temp folder is saved directly to MongoDB without moving it. This means your "temp" folder semantic is misleading — files are never actually moved to `posts/`. More on this in the review section. + +--- + +## 8. Your S3 Services — Full Line-by-Line Breakdown + +### `s3.service.ts` — The Core CRUD Service + +```typescript +// Line 1: Import the ENV config to get credentials/bucket name +import { ENV } from "../../config/env"; + +// Line 2: Import the AWS SDK v3 S3 client and commands +// SDK v3 uses a "command pattern": you create a command object and send it +import { S3Client, PutObjectCommand, DeleteObjectCommand } from "@aws-sdk/client-s3"; + +// Line 3: Import our custom TypeScript type for the upload response shape +import { UploadResponse } from "../../types/s3.types"; + +// Line 5: Declare a class — this is a service object +class S3Service { + // Line 6: Private field — the S3 client instance. Private means only this class can use it. + private s3Client: S3Client; + + // Line 7: Private field — the bucket name from env + private bucketName: string; + + // Lines 9–18: Constructor — runs when you do `new S3Service()` + constructor() { + // Lines 10–16: Create the S3Client with region and credentials + // S3Client is stateless — it just holds configuration + // It does NOT open a network connection here + this.s3Client = new S3Client({ + region: ENV.AWS.REGION || "ap-south-1", // Which AWS region to talk to + credentials: { + accessKeyId: ENV.AWS.ACCESS_KEY_ID, // "Who am I" + secretAccessKey: ENV.AWS.SECRET_ACCESS_KEY, // "My secret" + }, + }); + + // Line 17: Store the bucket name so every method can use it + this.bucketName = ENV.AWS.S3_BUCKET_NAME; + } + + // Lines 21–41: Upload a file to S3 + async uploadFile(key: string, fileBuffer: Buffer, fileType: string): Promise { + // key: WHERE in S3 to store it, e.g. "profiles/user123.jpg" + // fileBuffer: The actual binary content of the file + // fileType: MIME type like "image/jpeg" — tells S3 what kind of data it is + + try { + // Lines 23–28: Create a PutObject command + // PutObjectCommand is an instruction: "Upload this data to this bucket at this key" + // It does NOT upload yet — it's just configuration + const command = new PutObjectCommand({ + Bucket: this.bucketName, // Which bucket + Key: key, // "Filename" / path in S3 + Body: fileBuffer, // The actual file data (bytes) + ContentType: fileType, // Tells browsers how to display/download the file + }); + + // Line 30: Actually SEND the command to AWS + // this.s3Client.send() is what makes the HTTP request to AWS + // It returns the AWS response (which has ETag etc.) + const result = await this.s3Client.send(command); + + // Lines 32–37: Build and return a clean response object + return { + success: true, + key, + // ETag: A quoted MD5 hash of the uploaded content, like `"a1b2c3d4..."` + // Used to verify the upload completed correctly + etag: result.ETag || "", + // Build the public URL for this file + location: this.getS3Url(key), + }; + } catch (error) { + // Line 39: If upload fails, throw a descriptive error + // The `error instanceof Error` check ensures we can safely access .message + throw new Error(`Upload failed: ${error instanceof Error ? error.message : "Unknown error"}`); + } + } + + // Lines 43–47: Build the public URL for any S3 key + private getS3Url(key: string): string { + const region = ENV.AWS.REGION; + // Standard S3 URL format: https://{bucket}.s3.{region}.amazonaws.com/{key} + return `https://${this.bucketName}.s3.${region}.amazonaws.com/${key}`; + } + + // Lines 49–75: updateFile — used to REPLACE an existing file at the same key + // In S3, "update" = overwrite. Same key = same location = new data replaces old. + async updateFile( + key: string, + fileBuffer: Buffer, + fileType: string, + metadata?: Record, // Optional extra metadata (e.g., { "originalName": "cat.jpg" }) + ): Promise { + try { + const command = new PutObjectCommand({ + Bucket: this.bucketName, + Key: key, + Body: fileBuffer, + ContentType: fileType, + Metadata: metadata, // Stored as S3 object metadata, NOT in the file content itself + }); + + const result = await this.s3Client.send(command); + + return { + success: true, + key, + etag: result.ETag || "", + location: this.getS3Url(key), + }; + } catch (error) { + throw new Error(`Update failed: ${error instanceof Error ? error.message : "Unknown error"}`); + } + } + + // Lines 77–88: Delete a file from S3 + async deleteFile(key: string): Promise { + // key: The S3 key of the file to delete, e.g. "profiles/user123.jpg" + try { + const command = new DeleteObjectCommand({ + Bucket: this.bucketName, + Key: key, // The exact key to delete + // Note: S3 delete does NOT throw an error if the key doesn't exist. + // It silently succeeds. So you don't need to check if the file exists first. + }); + + await this.s3Client.send(command); + // No return value — void. If it didn't throw, it worked. + } catch (error) { + throw new Error(`Delete failed: ${error instanceof Error ? error.message : "Unknown error"}`); + } + } +} + +// Line 91: Export a SINGLETON instance — one shared S3Service object for the whole app +// This is a common pattern: create once, reuse everywhere. The S3Client is stateless so it's safe. +export default new S3Service(); +``` + +--- + +### `s3.upload.service.ts` — Presigned URLs & Advanced Operations + +```typescript +// Lines 1–7: Import specific commands and the client +import { + S3Client, + PutObjectCommand, // For generating presigned upload URLs + GetObjectCommand, // For generating presigned download URLs + downloading files + DeleteObjectCommand, // For deleting files directly + CopyObjectCommand, // For copying files within S3 (used in moveMediaToPermanent) +} from "@aws-sdk/client-s3"; + +// Line 8: getSignedUrl — the function that creates presigned URLs +// This is from a separate package: @aws-sdk/s3-request-presigner +import { getSignedUrl } from "@aws-sdk/s3-request-presigner"; + +import { ENV } from "../../config/env"; + +// Line 10: Node.js crypto module — used to generate random UUIDs +import crypto from "crypto"; + +// Line 11: Readable is a Node.js stream type +// S3's GetObject returns the file as a Node.js readable stream +import { Readable } from "stream"; + +// Lines 13–19: Create a MODULE-LEVEL s3Client +// Unlike s3.service.ts (which uses a class), this file uses a plain function pattern +// The client is created once when the module is first imported +const s3Client = new S3Client({ + region: ENV.AWS.REGION, + credentials: { + accessKeyId: ENV.AWS.ACCESS_KEY_ID, + secretAccessKey: ENV.AWS.SECRET_ACCESS_KEY, + }, +}); + +// ──────────────────────────────────────────────────────────── +// FUNCTION 1: getPresignedUploadUrl +// ──────────────────────────────────────────────────────────── +// +// Lines 21–44: Generate a presigned PUT URL +// This lets the BROWSER upload a file directly to S3 without going through your server +// +// Parameters: +// userId: The user's MongoDB ID (used to namespace files) +// filename: The original filename from the browser (e.g., "photo.jpg") +// mimeType: The file's MIME type (e.g., "image/jpeg") +// folder: Where in S3 to store it — defaults to "temp" + +export async function getPresignedUploadUrl( + userId: string, + filename: string, + mimeType: string, + folder: string = "temp", +): Promise<{ uploadUrl: string; s3Key: string; s3Url: string }> { + + // Line 27: Generate a RFC-4122 UUID (universally unique ID) + // Example: "550e8400-e29b-41d4-a716-446655440000" + // This ensures no two uploads ever collide, even simultaneously + const uuid = crypto.randomUUID(); + + // Line 28: Extract the file extension from the filename + // "photo.jpeg".split(".") → ["photo", "jpeg"] + // .pop() takes the last element → "jpeg" + // || "bin" is a fallback if no extension exists + const ext = filename.split(".").pop() || "bin"; + + // Lines 30–32: Build the S3 key (the file's "path" in S3) + // Special case: for "profiles" folder, use a fixed filename instead of UUID. + // Why? Because each user should only have ONE profile picture. + // If we used the UUID, every upload creates a new file. For profiles, we want + // the new upload to OVERWRITE the same key (same location = same URL in DB stays valid). + // + // For all other folders (temp, posts), use UUID to guarantee uniqueness. + // + // Profile key example: "profiles/64abc123-1708000000000.webp" + // Post/temp key example: "temp/64abc123/550e8400-e29b-41d4-a716.jpg" + const s3Key = + folder === "profiles" ? `${folder}/${filename}` : `${folder}/${userId}/${uuid}.${ext}`; + + // Lines 34–38: Create a PutObjectCommand + // Note: We don't pass a Body here — this command is ONLY used to generate the signature + // The browser will supply the actual file bytes when it uses the presigned URL + const command = new PutObjectCommand({ + Bucket: ENV.AWS.S3_BUCKET_NAME, + Key: s3Key, + ContentType: mimeType, // THIS IS IMPORTANT: the presigned URL will only accept this exact MIME type + }); + + // Line 40: Generate the presigned URL + // getSignedUrl(client, command, options) → returns a URL string + // expiresIn: 900 = 900 seconds = 15 minutes + // After 15 minutes, this URL becomes invalid. The browser must use it before then. + const uploadUrl = await getSignedUrl(s3Client, command, { expiresIn: 900 }); + + // Line 41: Build the permanent/public S3 URL for this key + // This is DIFFERENT from the uploadUrl (which has expiry params) + // This is the clean URL you save to MongoDB + const s3Url = `https://${ENV.AWS.S3_BUCKET_NAME}.s3.${ENV.AWS.REGION}.amazonaws.com/${s3Key}`; + + // Line 43: Return all three values + // uploadUrl → the browser uses this to PUT the file + // s3Key → you save this to find/delete the file later + // s3Url → the permanent URL you save to the database + return { uploadUrl, s3Key, s3Url }; +} + +// ──────────────────────────────────────────────────────────── +// FUNCTION 2: moveMediaToPermanent +// ──────────────────────────────────────────────────────────── +// +// Lines 46–72: Move a file from temp/ to posts/ after the post is confirmed +// This is a two-step operation: COPY to new location, then DELETE the original +// +// Parameters: +// tempKey: The current S3 key (in temp/) +// userId: User's ID +// postId: The confirmed Post ID (from MongoDB) + +export async function moveMediaToPermanent( + tempKey: string, + userId: string, + postId: string, +): Promise { + + // Line 51: Extract just the filename from the full key + // "temp/64abc123/uuid.jpg".split("/") → ["temp", "64abc123", "uuid.jpg"] + // .pop() → "uuid.jpg" + const filename = tempKey.split("/").pop(); + + // Line 52: Build the new permanent key + // Example: "posts/64abc123/507f1f77bcf86cd799439011/uuid.jpg" + const newKey = `posts/${userId}/${postId}/${filename}`; + + // Lines 54–61: COPY the object to the new key + // S3 CopyObject is a SERVER-SIDE operation — S3 copies the file internally + // Your server does NOT download and re-upload it. Very efficient. + await s3Client.send( + new CopyObjectCommand({ + Bucket: ENV.AWS.S3_BUCKET_NAME, // Destination bucket + CopySource: `${ENV.AWS.S3_BUCKET_NAME}/${tempKey}`, // Source: "bucket/key" + Key: newKey, // Destination key + }), + ); + + // Lines 63–69: DELETE the original temp file + // We only delete AFTER the copy succeeds (sequential, not parallel) + // This prevents data loss if the copy fails halfway + await s3Client.send( + new DeleteObjectCommand({ + Bucket: ENV.AWS.S3_BUCKET_NAME, + Key: tempKey, + }), + ); + + // Line 71: Return the new permanent URL + return `https://${ENV.AWS.S3_BUCKET_NAME}.s3.${ENV.AWS.REGION}.amazonaws.com/${newKey}`; +} + +// ──────────────────────────────────────────────────────────── +// FUNCTION 3: getPresignedDownloadUrl +// ──────────────────────────────────────────────────────────── +// +// Lines 74–84: Generate a presigned GET URL +// Used when the S3 file is PRIVATE (no public bucket policy) +// This gives a temporary URL that allows the holder to download the file +// +// Parameters: +// s3Key: The file's key in S3 +// expiresIn: How many seconds the URL is valid (default: 1 hour = 3600 seconds) + +export async function getPresignedDownloadUrl( + s3Key: string, + expiresIn: number = 3600, +): Promise { + + // Create a GetObjectCommand — a "read" command + const command = new GetObjectCommand({ + Bucket: ENV.AWS.S3_BUCKET_NAME, + Key: s3Key, + }); + + // Generate and return the presigned URL + // The URL will include your signature, expiry, etc. + // Anyone with this URL can download the file for the next `expiresIn` seconds + return await getSignedUrl(s3Client, command, { expiresIn }); +} + +// ──────────────────────────────────────────────────────────── +// FUNCTION 4: downloadMedia +// ──────────────────────────────────────────────────────────── +// +// Lines 86–100: Download a file from S3 and return it as a Buffer +// Used when your BACKEND needs the file bytes (e.g., to send to another API like Instagram) +// The posting workers use this to fetch media before uploading to social platforms + +export async function downloadMedia(s3Key: string): Promise { + + // Send the GetObjectCommand — this downloads the file + const response = await s3Client.send( + new GetObjectCommand({ + Bucket: ENV.AWS.S3_BUCKET_NAME, + Key: s3Key, // The file to download + }), + ); + + // Line 94: Cast Body to Readable + // response.Body is a Node.js Readable stream (data comes in chunks, not all at once) + // This is memory efficient — S3 doesn't dump 100MB into memory at once + const stream = response.Body as Readable; + + // Line 95: Array to collect binary chunks + const chunks: Buffer[] = []; + + // Lines 96–98: Read all chunks from the stream + // `for await...of` waits for each chunk from the stream + // Each `chunk` is a Buffer (raw bytes) + for await (const chunk of stream) { + chunks.push(chunk); + } + + // Line 99: Concatenate all chunks into one Buffer + // Buffer.concat takes an array of Buffers and merges them into one + return Buffer.concat(chunks); +} + +// ──────────────────────────────────────────────────────────── +// FUNCTION 5: extractS3Key +// ──────────────────────────────────────────────────────────── +// +// Lines 102–121: Parse an S3 URL and extract just the key part +// This is used everywhere you need to delete or reference a file by key +// given only its URL (e.g., from the database) +// +// Example: +// Input: "https://hayon-app-images.s3.ap-south-1.amazonaws.com/profiles/user.jpg" +// Output: "profiles/user.jpg" + +export function extractS3Key(s3Url: string): string { + if (!s3Url) return ""; + + // Lines 105–107: Try regex match for standard S3 URL formats + // Pattern 1: .s3.{region}.amazonaws.com/{key} (path-style with region) + // Pattern 2: .s3.amazonaws.com/{key} (legacy path-style) + // The (.+)$ captures everything after the last "/" — that's our key + const s3DomainMatch = + s3Url.match(/\.s3[.-][^/]+\.amazonaws\.com\/(.+)$/) || + s3Url.match(/\.s3\.amazonaws\.com\/(.+)$/); + + if (s3DomainMatch && s3DomainMatch[1]) { + // Lines 110–111: Strip query string parameters + // Presigned URLs have parameters after "?", but the key is everything before "?" + return s3DomainMatch[1].split("?")[0]; + } + + // Lines 114–118: Fallback — simpler split if regex didn't match + const parts = s3Url.split(".amazonaws.com/"); + if (parts.length > 1) { + return parts[1].split("?")[0]; + } + + // Line 120: Last resort — return the URL as-is + // This prevents errors but might indicate a non-S3 URL was passed in + return s3Url; +} +``` + +--- + +## 9. Upload Flow — Frontend to S3 to Database + +### Flow Diagram + +``` +┌──────────────┐ ┌──────────────┐ ┌──────────────┐ +│ Browser │ │ Your Backend │ │ AWS S3 │ +└──────┬───────┘ └──────┬───────┘ └──────┬───────┘ + │ │ │ + │ POST /posts/media/upload │ + │ { contentType: "image/jpeg" } │ + │ ──────────────────────>│ │ + │ │ │ + │ │ getPresignedUploadUrl() + │ │ (signs URL with secret key) + │ │◄───────────────────────┤ + │ │ │ + │ { uploadUrl, s3Url, s3Key } │ + │ <──────────────────────│ │ + │ │ │ + │ PUT │ │ + │ body: │ │ + │ ────────────────────────────────────────────────> + │ │ │ + │ 200 OK (ETag: "abc") │ │ + │ <──────────────────────────────────────────────── + │ │ │ + │ POST /posts │ │ + │ { content: { mediaItems: [{ s3Url, s3Key }] } } + │ ──────────────────────>│ │ + │ │ │ + │ │ Save post to MongoDB │ + │ │ { mediaItems: [s3Url] } + │ │ │ + │ 200 OK { postId } │ │ + │ <──────────────────────│ │ +``` + +### Your `uploadFiles()` in `useCreatePost.ts` — Line by Line + +```typescript +// Lines 433–461 of useCreatePost.ts +const uploadFiles = async (files: File[]) => { + // Guard: if no files, return empty array immediately + if (files.length === 0) return []; + + // Create an array of upload promises — all files upload SIMULTANEOUSLY (parallel) + const uploadPromises = files.map(async (file) => { + // STEP 1: Ask your backend for a presigned URL + // Your backend calls getPresignedUploadUrl() and returns: + // uploadUrl: the temporary S3 URL with embedded signature + // s3Url: the permanent URL (what gets saved to DB) + // s3Key: the S3 key (for future delete/reference) + const { data } = await api.post("/posts/media/upload", { + contentType: file.type, // e.g., "image/jpeg" + }); + + const { uploadUrl, s3Url, s3Key } = data.data; + + // STEP 2: Upload the file DIRECTLY to S3 + // Note: This is a BROWSER fetch() call — not going through your backend + // method: "PUT" — S3 presigned PUT upload requires PUT (not POST) + // body: file — the raw File object (browser handles binary encoding) + // headers: Content-Type MUST match what you specified when generating the URL + // If they don't match, S3 returns 403 SignatureDoesNotMatch + await fetch(uploadUrl, { + method: "PUT", + body: file, + headers: { + "Content-Type": file.type, + }, + }); + + // STEP 3: Return the metadata — the actual bytes are now in S3 + return { + s3Url, // The permanent URL (stored in DB) + s3Key, // The S3 key (for future operations) + mimeType: file.type, + }; + }); + + // Wait for ALL uploads to complete (parallel execution) + return Promise.all(uploadPromises); +}; +``` + +--- + +## 10. Download Flow — How Images Are Viewed + +### For Public Files (Profile Avatars) + +Since your profile images are stored at `profiles/` and your bucket policy allows public reads: + +``` +Browser renders: + ↓ + Browser sends GET request to S3 + ↓ + S3 checks bucket policy → "profiles/* is public → allow" + ↓ + S3 returns the image bytes + ↓ + Browser displays it +``` + +The URL is stored directly in MongoDB (in the user's `avatar` field). No backend involved in serving the image. + +### For Private Files (Post Media, if protected) + +```typescript +// Call getPresignedDownloadUrl to get a temporary viewable URL +const viewUrl = await getPresignedDownloadUrl("posts/userId/postId/uuid.jpg", 3600); +// This URL is valid for 1 hour +// Pass it to the frontend, which puts it in an +``` + +### For Server-Side Downloads (Posting Workers) + +When your `posting.worker.ts` needs to upload media to Instagram/Facebook, it downloads the file: + +```typescript +const imageBuffer = await downloadMedia("posts/userId/postId/uuid.jpg"); +// imageBuffer is a Buffer with the raw image bytes +// Now pass it to the Instagram API: formData.append("file", imageBuffer) +``` + +--- + +## 11. Delete Flow — Cleaning Up Old Data + +### Avatar Deletion in `profile.controller.ts` — Line by Line + +```typescript +// Lines 155–184: deleteProfileController + +export async function deleteProfileController(req: Request, res: Response): Promise { + const userId = req?.auth?.id as string; + + // Line 160: Get the currently stored avatar URL from the JWT auth payload + // req.auth is populated by your auth middleware after verifying the JWT + // .avatar is the field you store the S3 URL in + const currentAvatar = req.auth?.avatar; + + // Line 161: SANITY CHECK — only delete S3 files, not external avatars + // A user might have a Google OAuth avatar (lh3.googleusercontent.com) + // or a DiceBear avatar. We only call S3 delete if the URL contains our bucket name. + if (currentAvatar && currentAvatar.includes(ENV.AWS.S3_BUCKET_NAME)) { + + // Line 163: Extract the S3 key from the full URL + // "https://hayon-app-images.s3.ap-south-1.amazonaws.com/profiles/user.jpg" + // split here ↑ + // .split(".amazonaws.com/") → ["https://hayon-app-images.s3.ap-south-1", "profiles/user.jpg"] + // [1] → "profiles/user.jpg" ← the key we need + const s3Key = currentAvatar.split(".amazonaws.com/")[1]; + + if (s3Key) { + // Line 165: Delete the file from S3 + // If this fails, the file stays in S3 (a "ghost" file), but we don't crash the controller + await s3Service.deleteFile(s3Key); + } + } + + // Lines 169–171: Generate a random DiceBear avatar URL + // This replaces the deleted profile picture with a unique generated avatar + const min = 10000000; + const max = 99999999; + const randomNum = Math.floor(Math.random() * (max - min + 1)) + min; + + // Line 174: Update database with the new DiceBear URL + await updateAvatar(userId, `https://api.dicebear.com/7.x/identicon/svg?seed=/${randomNum}`); + + new SuccessResponse("avatar deleted successfully").send(res); +} +``` + +### Update Avatar (Replace Existing) in `updateProfileController` + +```typescript +// Lines 122–135 of profile.controller.ts +// When user uploads a NEW profile picture, delete the OLD one from S3 + +const currentAvatar = req.auth?.avatar; // Old avatar URL + +// Three conditions before deleting old avatar: +// 1. There IS a current avatar +// 2. The new imageUrl is DIFFERENT from the old one (don't delete if same URL) +// 3. The current avatar is from our S3 bucket (not Google/DiceBear) +if ( + currentAvatar && + currentAvatar !== imageUrl && + currentAvatar.includes(ENV.AWS.S3_BUCKET_NAME) +) { + const s3Key = currentAvatar.split(".amazonaws.com/")[1]; + if (s3Key) { + // .catch() wraps the delete — if delete fails, we log the error but CONTINUE. + // This is intentional: saving the new avatar URL matters more than cleaning up the old one. + // The old file becomes a "ghost" — wasted storage, but the user experience is not broken. + await s3Service + .deleteFile(s3Key) + .catch((err) => logger.error(`Failed to delete old avatar: ${err.message}`)); + } +} +``` + +--- + +## 12. Where S3 Is Used in Hayon + +| Location | What it does | S3 Operation | +|----------|-------------|-------------| +| `getProfileUploadUrlController` | Generate presigned URL for avatar upload | `getPresignedUploadUrl` (PUT) | +| `updateProfileController` | Delete old avatar when user uploads new one | `s3Service.deleteFile()` | +| `deleteProfileController` | Delete avatar when user removes profile picture | `s3Service.deleteFile()` | +| `getUploadUrls` (post.controller) | Generate presigned URL for post media upload | `getPresignedUploadUrl` (PUT) | +| `createPost` (post.controller) | Saves s3Url in the post document | MongoDB save (URL only) | +| `downloadMedia` | Posting workers download media to upload to platforms | `GetObjectCommand` | +| `useCreatePost.ts` (frontend) | Browser uploads files directly to presigned URL | Browser `fetch PUT` | +| `loadDraft` (frontend) | Loads s3Urls from DB and uses them as `` src | Plain URL read | +| `next.config.ts` | Allows Next.js `` to load from S3 domain | Image optimization config | +| Notification payload (post.controller L135) | Uses s3Url as notification thumbnail | URL reference | + +--- + +## 13. Next.js & Image Optimization + +When you use Next.js `` component, Next.js: +1. Intercepts the image request +2. Downloads the image from the remote source +3. Resizes/compresses/converts it to WebP +4. Serves the optimized version from its cache + +But Next.js only does this for **whitelisted domains**. That's why `next.config.ts` has: + +```typescript +// next.config.ts — Lines 16–22 +{ + protocol: "https", + hostname: "hayon-app-images.s3.amazonaws.com", + // Allows: https://hayon-app-images.s3.amazonaws.com/** +}, +{ + protocol: "https", + hostname: "hayon-app-images.s3.ap-south-1.amazonaws.com", + // Allows: https://hayon-app-images.s3.ap-south-1.amazonaws.com/** +}, +``` + +**You have two separate entries** because: +- `s3.amazonaws.com` is the legacy global endpoint (used in older AWS SDK versions) +- `s3.ap-south-1.amazonaws.com` is the regional endpoint (what your current code generates) + +Both are needed for safety, as different parts of your code or older saved URLs might use either format. + +> ⚠️ Without these entries, `` would throw: `Error: Invalid src prop: hostname not configured under images in next.config.ts` + +--- + +## 14. Security Best Practices + +### ✅ Things You're Doing Right + +1. **Presigned URLs for upload** — Browser never gets your AWS credentials +2. **Validating content-type before generating URLs** — Prevents users from uploading `.exe` or `.php` files +3. **Validating the imageUrl contains your bucket name** before saving it to DB — Prevents storing arbitrary external URLs as "profile pictures" +4. **Deleting old avatars from S3** before replacing — No orphaned files +5. **Using `required()` for env vars** — App crashes at startup if credentials are missing, not at runtime + +### IAM — Minimum Privilege Policy + +Instead of `AmazonS3FullAccess` for your IAM user, use a custom policy that restricts access to only your bucket and only the operations you need: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:CopyObject" + ], + "Resource": "arn:aws:s3:::hayon-app-images/*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::hayon-app-images" + } + ] +} +``` + +This means if your AWS credentials are ever leaked, the attacker can ONLY access `hayon-app-images`, and cannot create new buckets, access other services (EC2, RDS, etc.), or modify bucket settings. + +--- + +## 15. Code Review — Mistakes & Improvements + +### 🔴 Bug #1: `moveMediaToPermanent` is Never Called + +**Location**: `s3.upload.service.ts` — Lines 46–72 +**Problem**: The function `moveMediaToPermanent` was written to move files from `temp/` → `posts/` after post creation. But in `post.controller.ts`, after creating the post, this function is **never called**. The `s3Url` from the temp folder (e.g., `temp/userId/uuid.jpg`) is saved directly to MongoDB. + +```typescript +// In post.controller.ts createPost() — what you DO: +const post = await postRepository.createPost(postData); +// postData.content.mediaItems[].s3Url still points to temp/ ← never moved + +// What you SHOULD do: +const post = await postRepository.createPost(postData); +// Then move each temp file to permanent location +const movedMediaItems = await Promise.all( + content.mediaItems.map(async (item) => { + const permanentUrl = await moveMediaToPermanent( + item.s3Key!, + userId, + post._id.toString() + ); + return { ...item, s3Url: permanentUrl }; + }) +); +// Then update the post with permanent URLs +``` + +**Impact**: Files sit in `temp/` forever. If you add an S3 lifecycle rule to delete `temp/` objects after 7 days, post media gets deleted. + +--- + +### 🔴 Bug #2: `deleteMedia` Controller is Not Implemented + +**Location**: `post.controller.ts` — Lines 384–395 +**Problem**: The `deleteMedia` controller simply returns `501 Not Implemented`. This means when a post is deleted, the associated S3 media files are never cleaned up. + +```typescript +// Current implementation: +export const deleteMedia = async (req: Request, res: Response) => { + // ... + return new ErrorResponse("Not implemented", { status: 501 }).send(res); +}; + +// Fix: In deletePost(), after deleting from MongoDB, also delete S3 files: +export const deletePost = async (req: Request, res: Response) => { + const deletedPost = await postRepository.deletePost(postId, userId); + + // Clean up S3 media + if (deletedPost?.content?.mediaItems?.length) { + for (const item of deletedPost.content.mediaItems) { + if (item.s3Key) { + await s3Service.deleteFile(item.s3Key).catch((err) => + logger.error(`Failed to delete S3 media: ${err.message}`) + ); + } + } + } +}; +``` + +--- + +### 🟡 Issue #3: Duplicate S3 Client Initialization + +**Location**: Both `s3.service.ts` and `s3.upload.service.ts` +**Problem**: You create two separate `S3Client` instances: one in the `S3Service` class, and one at module level in `s3.upload.service.ts`. These are configured identically. While S3Client is stateless (so creating two isn't catastrophically bad), it's wasteful and inconsistent. + +**Fix**: Create one shared `s3Client` instance in a dedicated file, then import it both places: + +```typescript +// backend/src/services/s3/s3.client.ts (new file) +import { S3Client } from "@aws-sdk/client-s3"; +import { ENV } from "../../config/env"; + +export const s3Client = new S3Client({ + region: ENV.AWS.REGION, + credentials: { + accessKeyId: ENV.AWS.ACCESS_KEY_ID, + secretAccessKey: ENV.AWS.SECRET_ACCESS_KEY, + }, +}); + +// Then in both s3.service.ts and s3.upload.service.ts: +import { s3Client } from "./s3.client"; +``` + +--- + +### 🟡 Issue #4: `extractS3Key` vs Manual `.split(".amazonaws.com/")[1]` + +**Location**: `profile.controller.ts` — Lines 129, 163 +**Problem**: You have a perfectly good `extractS3Key()` function in `s3.upload.service.ts`, but `profile.controller.ts` manually splits the URL using `.split(".amazonaws.com/")[1]` instead. + +```typescript +// Current (fragile): +const s3Key = currentAvatar.split(".amazonaws.com/")[1]; + +// Better (use your existing utility): +import { extractS3Key } from "../services/s3/s3.upload.service"; +const s3Key = extractS3Key(currentAvatar); +``` + +The manual split doesn't handle edge cases like presigned URLs with query parameters, which `extractS3Key` does handle. + +--- + +### 🟡 Issue #5: Typo in Response Message + +**Location**: `profile.controller.ts` — Line 176 +```typescript +// Current (typo): +new SuccessResponse("avatart deleted successfully").send(res); +// ↑ extra 't' + +// Fix: +new SuccessResponse("avatar deleted successfully").send(res); +``` + +--- + +### 🟡 Issue #6: Debug `console.log` Left in Production Code + +**Location**: `profile.controller.ts` — Line 173 +```typescript +console.log(req.auth); // ← This logs sensitive auth data to production logs! + +// Remove this line entirely, or if you need to debug, use: +logger.debug("Auth data:", req.auth); +// And ensure debug logs are disabled in production via LOG_LEVEL env var +``` + +--- + +### 🟡 Issue #7: `getProfileUploadUrlController` — Post Usage is Broken + +**Location**: `profile.controller.ts` — Lines 87–90 +```typescript +if (usage === "post") { + folder = "temp"; + filename = `image.${ext}`; // ← BUG: Every post image has the exact same key! +} +``` + +If `usage === "post"`, the key would be `temp/image.jpg` for every user — they'd overwrite each other's uploads. However, this code path doesn't appear to be used in the app (posts use `getUploadUrls` in `post.controller.ts` instead), but it's still a latent bug. + +--- + +### 🟢 Improvement: Add S3 Lifecycle Rules + +In the AWS S3 console, go to **Management → Lifecycle Rules → Create Rule**: + +- **Rule 1**: Delete objects in `temp/` after 2 days + - Filter: Prefix = `temp/` + - Action: Expire after 2 days + +This automatically cleans up files uploaded but never confirmed (abandoned uploads), preventing storage cost buildup. + +--- + +### 🟢 Improvement: Use CloudFront for CDN + +Instead of serving images directly from S3: +``` +https://hayon-app-images.s3.ap-south-1.amazonaws.com/profiles/user.jpg +``` + +Set up a CloudFront distribution in front of your S3 bucket: +``` +https://d1234abcd.cloudfront.net/profiles/user.jpg +``` + +Benefits: +- Images served from **edge locations worldwide** (much faster for users outside India) +- CloudFront caches images — reduced S3 GET requests → lower costs +- You can use a custom domain: `https://assets.hayon.app/profiles/user.jpg` +- Better security: keep bucket completely private, only CloudFront can read it + +--- + +## Summary — The Complete S3 Journey in Hayon + +``` +User selects photo on frontend + │ + ▼ +useCreatePost.ts: uploadFiles() + 1. POST /posts/media/upload (contentType) + │ + ▼ +post.controller.ts: getUploadUrls() + 2. Validates mimeType is in allowedTypes + 3. Calls getPresignedUploadUrl(userId, filename, contentType, "posts") + │ + ▼ +s3.upload.service.ts: getPresignedUploadUrl() + 4. Generates UUID for uniqueness + 5. Builds s3Key: "posts/{userId}/{uuid}.{ext}" + 6. Creates PutObjectCommand (no Body yet) + 7. Signs it with AWS SDK → uploadUrl (valid 15 min) + 8. Builds permanent s3Url + 9. Returns { uploadUrl, s3Key, s3Url } + │ + ▼ +Back in frontend: useCreatePost.ts + 10. Browser fetch(uploadUrl, { method: "PUT", body: file }) + 11. File bytes travel directly from browser → AWS S3 + 12. S3 verifies signature, stores the file + │ + ▼ +Frontend collects { s3Url, s3Key, mimeType } per file + │ + ▼ +User clicks "Post Now" + 13. POST /posts with payload including mediaItems: [{ s3Url, s3Key, mimeType }] + │ + ▼ +post.controller.ts: createPost() + 14. Validates payload with Zod schema + 15. Creates post document in MongoDB with s3Url + 16. Queues posting jobs to RabbitMQ + │ + ▼ +posting.worker.ts picks up the job + 17. Reads mediaUrls (s3Url) from the job + 18. Calls downloadMedia(s3Key) to get file as Buffer + 19. Uploads buffer to Instagram/Facebook/etc. API + │ + ▼ +Platform posts the image +User's profile shows the post with the S3 image URL +``` + +--- + +*Last updated: 2026-02-21 | Project: Hayon* diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..3fbb7cd --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,74 @@ +# ============================================================ +# Stage 1: Builder +# ============================================================ +FROM node:20-alpine AS builder + +RUN corepack enable && corepack prepare pnpm@9 --activate + +WORKDIR /app + +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ + +COPY schemas/ ./schemas/ + +COPY frontend/package.json ./frontend/ +COPY frontend/tsconfig.json ./frontend/ +COPY frontend/next.config.ts ./frontend/ +COPY frontend/postcss.config.mjs ./frontend/ +COPY frontend/components.json ./frontend/ +COPY frontend/public/ ./frontend/public/ +COPY frontend/src/ ./frontend/src/ + +RUN pnpm install --frozen-lockfile + +RUN cd schemas && pnpm run build + +ARG NEXT_PUBLIC_API_URL +ARG NEXT_PUBLIC_VAPID_KEY +ARG NEXT_PUBLIC_FIREBASE_API_KEY +ARG NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN +ARG NEXT_PUBLIC_FIREBASE_PROJECT_ID +ARG NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET +ARG NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID +ARG NEXT_PUBLIC_FIREBASE_APP_ID +ARG NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID + +ENV NEXT_PUBLIC_API_URL=$NEXT_PUBLIC_API_URL +ENV NEXT_PUBLIC_VAPID_KEY=$NEXT_PUBLIC_VAPID_KEY +ENV NEXT_PUBLIC_FIREBASE_API_KEY=$NEXT_PUBLIC_FIREBASE_API_KEY +ENV NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN=$NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN +ENV NEXT_PUBLIC_FIREBASE_PROJECT_ID=$NEXT_PUBLIC_FIREBASE_PROJECT_ID +ENV NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET=$NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET +ENV NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=$NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID +ENV NEXT_PUBLIC_FIREBASE_APP_ID=$NEXT_PUBLIC_FIREBASE_APP_ID +ENV NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=$NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID + + +RUN sed -i "s|NEXT_PUBLIC_FIREBASE_API_KEY_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_API_KEY}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_PROJECT_ID_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_PROJECT_ID}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_APP_ID_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_APP_ID}|g" /app/frontend/public/firebase-messaging-sw.js && \ + sed -i "s|NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID_PLACEHOLDER|${NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID}|g" /app/frontend/public/firebase-messaging-sw.js + +RUN cd frontend && pnpm run build + +# ============================================================ +# Stage 2: Production Runtime (standalone) +# ============================================================ +FROM node:20-alpine AS runner + +WORKDIR /app + +ENV NODE_ENV=production +ENV PORT=3000 +ENV HOSTNAME=0.0.0.0 + +COPY --from=builder /app/frontend/.next/standalone ./ +COPY --from=builder /app/frontend/.next/static ./frontend/.next/static +COPY --from=builder /app/frontend/public ./frontend/public + +EXPOSE 3000 + +CMD ["node", "frontend/server.js"] diff --git a/nginx/nginx.conf b/nginx/nginx.conf new file mode 100644 index 0000000..769e3a8 --- /dev/null +++ b/nginx/nginx.conf @@ -0,0 +1,151 @@ +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent"'; + access_log /var/log/nginx/access.log main; + error_log /var/log/nginx/error.log warn; + + sendfile on; + keepalive_timeout 65; + gzip on; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + + # -------------------------------------------------------- + # Upstream definitions + # -------------------------------------------------------- + upstream frontend { + server frontend:3000; + } + + upstream backend { + server backend:5000; + } + + # -------------------------------------------------------- + # HTTP → HTTPS redirect (handles ALL domains) + # -------------------------------------------------------- + server { + listen 80; + server_name hayon.site www.hayon.site api.hayon.site; + + # Certbot ACME challenge — needed for SSL cert issuance/renewal + location /.well-known/acme-challenge/ { + root /var/www/certbot; + } + + location / { + return 301 https://$host$request_uri; + } + } + + # -------------------------------------------------------- + # www.hayon.site → redirect to non-www + # -------------------------------------------------------- + server { + listen 443 ssl; + server_name www.hayon.site; + + ssl_certificate /etc/letsencrypt/live/hayon.site/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/hayon.site/privkey.pem; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + + return 301 https://hayon.site$request_uri; + } + + # -------------------------------------------------------- + # hayon.site — Frontend + # -------------------------------------------------------- + server { + listen 443 ssl; + server_name hayon.site; + + ssl_certificate /etc/letsencrypt/live/hayon.site/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/hayon.site/privkey.pem; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + + client_max_body_size 50m; + + location / { + proxy_pass http://frontend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + } + } + + # -------------------------------------------------------- + # api.hayon.site — Backend API + WebSocket + # -------------------------------------------------------- + server { + listen 443 ssl; + server_name api.hayon.site; + + ssl_certificate /etc/letsencrypt/live/api.hayon.site/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/api.hayon.site/privkey.pem; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL_api:10m; + ssl_session_timeout 10m; + + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + + client_max_body_size 50m; + + # All API routes + location /api/ { + proxy_pass http://backend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 300s; + } + + # WebSocket (Socket.io) + location /socket.io/ { + proxy_pass http://backend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 86400s; + } + + # Health check + location /health { + proxy_pass http://backend; + proxy_set_header Host $host; + } + } +} diff --git a/rabbitmq/Dockerfile b/rabbitmq/Dockerfile new file mode 100644 index 0000000..f6ebd8c --- /dev/null +++ b/rabbitmq/Dockerfile @@ -0,0 +1,7 @@ +FROM rabbitmq:3.13-management-alpine + +RUN apk add --no-cache curl && \ + curl -L https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/v3.13.0/rabbitmq_delayed_message_exchange-3.13.0.ez \ + -o /opt/rabbitmq/plugins/rabbitmq_delayed_message_exchange-3.13.0.ez + +COPY enabled_plugins /etc/rabbitmq/enabled_plugins \ No newline at end of file diff --git a/rabbitmq/enabled_plugins b/rabbitmq/enabled_plugins new file mode 100644 index 0000000..31d6df6 --- /dev/null +++ b/rabbitmq/enabled_plugins @@ -0,0 +1 @@ +[rabbitmq_management,rabbitmq_delayed_message_exchange]. From 88de4d791a6202f780aaa37b05890326b938e058 Mon Sep 17 00:00:00 2001 From: hafzism Date: Sun, 3 May 2026 02:55:40 +0530 Subject: [PATCH 3/3] refactor: transition to container-based CI/CD pipeline with GHCR image builds --- .github/workflows/cd.yml | 183 +++++++++++++++++++++++++++------------ docker-compose.prod.yml | 182 ++++++++++++++++++++++++++++++++++++++ docker-compose.yml | 48 +++++++--- scripts/certbot-init.sh | 121 ++++++++++++++++++++++++++ scripts/ec2-setup.sh | 100 +++++++++++++++++++++ 5 files changed, 569 insertions(+), 65 deletions(-) create mode 100644 docker-compose.prod.yml create mode 100644 scripts/certbot-init.sh create mode 100644 scripts/ec2-setup.sh diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index fc51af8..e2ada63 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -1,21 +1,112 @@ -name: Deploy to EC2 (Docker) +name: Build & Deploy on: push: branches: - main +env: + REGISTRY: ghcr.io + # e.g. ghcr.io/devxtra-community/hayon + IMAGE_BASE: ghcr.io/${{ github.repository_owner }}/hayon + jobs: + # ============================================================ + # Job 1: Build images on GitHub Actions (7 GB RAM, fast) + # ============================================================ + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write # needed to push to GHCR + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + # ---- Login to GitHub Container Registry ---- + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} # auto-provided, no setup needed + + # ---- Docker Buildx (for better caching) ---- + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # ---- Write Firebase service account (needed in backend image) ---- + - name: Write Firebase service account + run: | + echo '${{ secrets.FIREBASE_SERVICE_ACCOUNT_JSON }}' \ + > backend/src/serviceAccountKey.json + + # ---- Build & push backend image ---- + - name: Build and push backend + uses: docker/build-push-action@v5 + with: + context: . + file: backend/Dockerfile + push: true + tags: ${{ env.IMAGE_BASE }}-backend:latest + # Cache layers between runs — speeds up subsequent builds significantly + cache-from: type=gha + cache-to: type=gha,mode=max + + # ---- Build & push worker image (same Dockerfile as backend) ---- + # We tag it separately so compose can reference it cleanly + - name: Tag worker image (reuses backend) + run: | + docker pull ${{ env.IMAGE_BASE }}-backend:latest + docker tag ${{ env.IMAGE_BASE }}-backend:latest ${{ env.IMAGE_BASE }}-worker:latest + docker push ${{ env.IMAGE_BASE }}-worker:latest + + # ---- Build & push frontend image ---- + # NEXT_PUBLIC_ vars are baked in at build time, so they're passed here + - name: Build and push frontend + uses: docker/build-push-action@v5 + with: + context: . + file: frontend/Dockerfile + push: true + tags: ${{ env.IMAGE_BASE }}-frontend:latest + # No cache for frontend — NEXT_PUBLIC_ env vars are baked in. + # A cached image would have stale values. + build-args: | + NEXT_PUBLIC_API_URL=${{ secrets.BACKEND_URL }} + NEXT_PUBLIC_VAPID_KEY=${{ secrets.NEXT_PUBLIC_VAPID_KEY }} + NEXT_PUBLIC_FIREBASE_API_KEY=${{ secrets.NEXT_PUBLIC_FIREBASE_API_KEY }} + NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN=${{ secrets.NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN }} + NEXT_PUBLIC_FIREBASE_PROJECT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_PROJECT_ID }} + NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET=${{ secrets.NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET }} + NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID }} + NEXT_PUBLIC_FIREBASE_APP_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_APP_ID }} + NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID }} + + # ---- Build & push custom RabbitMQ image ---- + - name: Build and push rabbitmq + uses: docker/build-push-action@v5 + with: + context: ./rabbitmq + file: rabbitmq/Dockerfile + push: true + tags: ${{ env.IMAGE_BASE }}-rabbitmq:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + # ============================================================ + # Job 2: Deploy on EC2 — just pull images and restart + # Runs only after build job succeeds + # ============================================================ deploy: runs-on: ubuntu-latest + needs: build # wait for build job to finish steps: - name: Checkout repo uses: actions/checkout@v4 - # -------------------------------------------------------- - # SSH Setup - # -------------------------------------------------------- - name: Setup SSH run: | mkdir -p ~/.ssh @@ -23,31 +114,24 @@ jobs: chmod 600 ~/.ssh/ec2.pem ssh-keyscan -H ${{ secrets.EC2_HOST }} >> ~/.ssh/known_hosts - # -------------------------------------------------------- - # Sync project files to EC2 - # Excludes: node_modules, .next, dist, .git, local env files - # -------------------------------------------------------- - - name: Sync project to EC2 + # ---- Sync config files (compose, nginx, rabbitmq config) ---- + # We only need non-built files. Source code is NOT needed on EC2. + - name: Sync config files to EC2 run: | - rsync -avz --delete \ - --exclude='node_modules' \ - --exclude='.next' \ - --exclude='dist' \ - --exclude='.git' \ - --exclude='backend/.env' \ - --exclude='frontend/.env.local' \ - --exclude='certbot' \ + rsync -avz \ -e "ssh -i ~/.ssh/ec2.pem" \ - ./ \ + docker-compose.prod.yml \ + nginx/ \ + rabbitmq/enabled_plugins \ + scripts/ \ ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }}:${{ secrets.APP_DIR }}/ - # -------------------------------------------------------- - # Inject secrets on the remote server - # -------------------------------------------------------- + # ---- Write backend .env ---- - name: Write backend .env on EC2 run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' - cat > ${{ secrets.APP_DIR }}/backend/.env << 'EOF' + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} bash << 'ENDSSH' + mkdir -p ${{ secrets.APP_DIR }}/backend + cat > ${{ secrets.APP_DIR }}/backend/.env << 'EOF' NODE_ENV=production PORT=5000 FRONTEND_URL=${{ secrets.FRONTEND_URL }} @@ -89,47 +173,36 @@ jobs: EOF ENDSSH - - name: Write Firebase service account on EC2 - run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} \ - "echo '${{ secrets.FIREBASE_SERVICE_ACCOUNT_JSON }}' > ${{ secrets.APP_DIR }}/backend/src/serviceAccountKey.json" - - - name: Write root .env (for docker-compose build args) on EC2 + # ---- Write root .env (registry + rabbitmq creds for compose) ---- + - name: Write root .env on EC2 run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' - cat > ${{ secrets.APP_DIR }}/.env << 'EOF' - NEXT_PUBLIC_API_URL=${{ secrets.BACKEND_URL }} - NEXT_PUBLIC_VAPID_KEY=${{ secrets.NEXT_PUBLIC_VAPID_KEY }} - NEXT_PUBLIC_FIREBASE_API_KEY=${{ secrets.NEXT_PUBLIC_FIREBASE_API_KEY }} - NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN=${{ secrets.NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN }} - NEXT_PUBLIC_FIREBASE_PROJECT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_PROJECT_ID }} - NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET=${{ secrets.NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET }} - NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID }} - NEXT_PUBLIC_FIREBASE_APP_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_APP_ID }} - NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID }} + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} bash << 'ENDSSH' + cat > ${{ secrets.APP_DIR }}/.env << 'EOF' RABBITMQ_USER=${{ secrets.RABBITMQ_USER }} RABBITMQ_PASS=${{ secrets.RABBITMQ_PASS }} + IMAGE_BASE=ghcr.io/${{ github.repository_owner }}/hayon EOF ENDSSH - # -------------------------------------------------------- - # Build & restart containers on EC2 - # -------------------------------------------------------- - - name: Deploy with Docker Compose + # ---- Pull pre-built images and restart ---- + - name: Pull images and restart containers run: | - ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} << 'ENDSSH' - set -e - cd ${{ secrets.APP_DIR }} + ssh -i ~/.ssh/ec2.pem ${{ secrets.EC2_USER }}@${{ secrets.EC2_HOST }} bash << 'ENDSSH' + set -e + cd ${{ secrets.APP_DIR }} + + # Login to GHCR on EC2 (uses a personal access token) + echo "${{ secrets.GHCR_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - # Pull latest images (redis, rabbitmq, nginx) - docker compose pull redis rabbitmq nginx certbot + # Pull the freshly built images + docker compose -f docker-compose.prod.yml pull - # Build app images (frontend + backend/worker) - docker compose build --no-cache frontend backend + # Restart with new images + docker compose -f docker-compose.prod.yml up -d --remove-orphans - # Restart everything with zero manual intervention - docker compose up -d --remove-orphans + # Clean up old images + docker image prune -f - # Remove dangling images to save disk space - docker image prune -f + echo "=== Deploy complete ===" + docker compose -f docker-compose.prod.yml ps ENDSSH diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..ede9a94 --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,182 @@ +# Production compose file — used on EC2 +# Uses pre-built images from GHCR (GitHub Container Registry) +# Images are built in GitHub Actions CI/CD, not here. +# +# Usage: docker compose -f docker-compose.prod.yml up -d + +name: hayon + +services: + # ============================================================ + # Nginx — reverse proxy + TLS termination + # ============================================================ + nginx: + image: nginx:1.27-alpine + container_name: hayon_nginx + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro + - ./certbot/conf:/etc/letsencrypt:ro + - ./certbot/www:/var/www/certbot:ro + depends_on: + - frontend + - backend + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 64M + + # ============================================================ + # Certbot — SSL renewal daemon + # ============================================================ + certbot: + image: certbot/certbot:latest + container_name: hayon_certbot + volumes: + - ./certbot/conf:/etc/letsencrypt + - ./certbot/www:/var/www/certbot + entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" + networks: + - hayon_net + + # ============================================================ + # Frontend — pre-built image from GHCR + # ============================================================ + frontend: + image: ${IMAGE_BASE}-frontend:latest + container_name: hayon_frontend + expose: + - "3000" + environment: + - NODE_ENV=production + - PORT=3000 + - HOSTNAME=0.0.0.0 + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 256M + + # ============================================================ + # Backend — pre-built image from GHCR + # ============================================================ + backend: + image: ${IMAGE_BASE}-backend:latest + container_name: hayon_backend + expose: + - "5000" + env_file: + - backend/.env + environment: + - NODE_ENV=production + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-hayon}:${RABBITMQ_PASS:-hayon_secret}@rabbitmq:5672 + - REDIS_HOST=redis + - REDIS_PORT=6379 + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 256M + + # ============================================================ + # Worker — same backend image, different command + # ============================================================ + worker: + image: ${IMAGE_BASE}-worker:latest + container_name: hayon_worker + command: ["node", "dist/workers/index.js"] + env_file: + - backend/.env + environment: + - NODE_ENV=production + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-hayon}:${RABBITMQ_PASS:-hayon_secret}@rabbitmq:5672 + - REDIS_HOST=redis + - REDIS_PORT=6379 + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 200M + + # ============================================================ + # Redis + # ============================================================ + redis: + image: redis:7-alpine + container_name: hayon_redis + expose: + - "6379" + volumes: + - redis_data:/data + command: redis-server --appendonly yes --maxmemory 100mb --maxmemory-policy allkeys-lru + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 128M + + # ============================================================ + # RabbitMQ — pre-built image from GHCR (has delayed plugin) + # ============================================================ + rabbitmq: + image: ${IMAGE_BASE}-rabbitmq:latest + container_name: hayon_rabbitmq + expose: + - "5672" + ports: + - "127.0.0.1:15672:15672" + volumes: + - rabbitmq_data:/var/lib/rabbitmq + environment: + RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-hayon} + RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASS:-hayon_secret} + RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit vm_memory_high_watermark 0.4" + healthcheck: + test: ["CMD", "rabbitmq-diagnostics", "ping"] + interval: 15s + timeout: 10s + retries: 10 + start_period: 30s + restart: unless-stopped + networks: + - hayon_net + deploy: + resources: + limits: + memory: 256M + +volumes: + redis_data: + rabbitmq_data: + +networks: + hayon_net: + driver: bridge diff --git a/docker-compose.yml b/docker-compose.yml index c52df5d..3a82549 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ name: hayon services: # ============================================================ - # Nginx + # Nginx — reverse proxy + TLS termination # ============================================================ nginx: image: nginx:1.27-alpine @@ -20,9 +20,13 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 64M # ============================================================ - # Certbot + # Certbot — SSL renewal daemon # ============================================================ certbot: image: certbot/certbot:latest @@ -30,13 +34,12 @@ services: volumes: - ./certbot/conf:/etc/letsencrypt - ./certbot/www:/var/www/certbot - # Run manually: docker compose run --rm certbot certonly ... entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" networks: - hayon_net # ============================================================ - # Frontend + # Frontend — Next.js standalone # ============================================================ frontend: build: @@ -62,9 +65,13 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 256M # ============================================================ - # Backend + # Backend — Express API # ============================================================ backend: build: @@ -88,9 +95,13 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 256M # ============================================================ - # Worker + # Worker — RabbitMQ consumer (same image, different CMD) # ============================================================ worker: build: @@ -113,9 +124,13 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 200M # ============================================================ - # Redis + # Redis — cache # ============================================================ redis: image: redis:7-alpine @@ -124,7 +139,9 @@ services: - "6379" volumes: - redis_data:/data - command: redis-server --appendonly yes + # maxmemory 100mb: prevents Redis from eating all RAM on t2.micro + # allkeys-lru: evict least-recently-used keys when memory is full + command: redis-server --appendonly yes --maxmemory 100mb --maxmemory-policy allkeys-lru healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 10s @@ -133,9 +150,13 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 128M # ============================================================ - # RabbitMQ + # RabbitMQ — message broker (with delayed message plugin) # ============================================================ rabbitmq: build: @@ -145,12 +166,15 @@ services: expose: - "5672" ports: - - "127.0.0.1:15672:15672" + # Management UI — only accessible locally via SSH tunnel + - "127.0.0.1:15672:15672" volumes: - rabbitmq_data:/var/lib/rabbitmq environment: RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-hayon} RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASS:-hayon_secret} + # Limit memory usage — important for t2.micro + RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit vm_memory_high_watermark 0.4" healthcheck: test: ["CMD", "rabbitmq-diagnostics", "ping"] interval: 15s @@ -160,6 +184,10 @@ services: restart: unless-stopped networks: - hayon_net + deploy: + resources: + limits: + memory: 256M volumes: redis_data: diff --git a/scripts/certbot-init.sh b/scripts/certbot-init.sh new file mode 100644 index 0000000..84fad92 --- /dev/null +++ b/scripts/certbot-init.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# ============================================================= +# Certbot Bootstrap — Run this ONCE on the EC2 after DNS is live +# +# Pre-requisites: +# 1. DNS is pointing to this EC2 (nslookup hayon.site returns EC2 IP) +# 2. Docker is installed (ran ec2-setup.sh) +# 3. Directories exist: /home/ubuntu/hayon/certbot/{conf,www} and nginx/ +# +# What it does: +# Phase 1 → Start a throwaway nginx on port 80 (no compose, no images needed) +# Phase 2 → Run certbot to get certs for hayon.site + api.hayon.site +# Phase 3 → Stop throwaway nginx (real app starts via CD pipeline) +# ============================================================= + +set -e + +APP_DIR="/home/ubuntu/hayon" +EMAIL="hayon.app@gmail.com" # ← used for cert expiry alerts from Let's Encrypt + +echo "==========================================" +echo " Certbot SSL Bootstrap" +echo "==========================================" + +# Verify directories exist +mkdir -p $APP_DIR/certbot/conf +mkdir -p $APP_DIR/certbot/www +mkdir -p $APP_DIR/nginx + +# ---------------------------------------------------------- +# Phase 1: Start a temporary standalone nginx on port 80 +# Uses docker run directly — no compose file needed yet +# ---------------------------------------------------------- +echo "" +echo "[Phase 1] Starting temporary HTTP nginx..." + +# Write a minimal HTTP-only nginx config +cat > /tmp/nginx-bootstrap.conf << 'EOF' +events { worker_connections 1024; } +http { + server { + listen 80; + server_name hayon.site www.hayon.site api.hayon.site; + + # Let's Encrypt ACME challenge + location /.well-known/acme-challenge/ { + root /var/www/certbot; + } + + location / { + return 200 'Hayon — SSL setup in progress...'; + add_header Content-Type text/plain; + } + } +} +EOF + +# Kill any existing container on port 80 +docker stop temp_nginx 2>/dev/null && docker rm temp_nginx 2>/dev/null || true + +docker run -d \ + --name temp_nginx \ + -p 80:80 \ + -v /tmp/nginx-bootstrap.conf:/etc/nginx/nginx.conf:ro \ + -v $APP_DIR/certbot/www:/var/www/certbot \ + nginx:1.27-alpine + +echo "[Phase 1] Temporary nginx running on port 80" +echo " Verifying: curl http://hayon.site" +sleep 3 +curl -s http://hayon.site || echo "Warning: curl failed — check DNS is pointing here" + +# ---------------------------------------------------------- +# Phase 2: Get SSL certificates +# ---------------------------------------------------------- +echo "" +echo "[Phase 2] Getting cert for hayon.site + www.hayon.site..." + +docker run --rm \ + -v $APP_DIR/certbot/conf:/etc/letsencrypt \ + -v $APP_DIR/certbot/www:/var/www/certbot \ + certbot/certbot certonly \ + --webroot \ + --webroot-path=/var/www/certbot \ + --email $EMAIL \ + --agree-tos \ + --no-eff-email \ + -d hayon.site \ + -d www.hayon.site + +echo "" +echo "[Phase 2] Getting cert for api.hayon.site..." + +docker run --rm \ + -v $APP_DIR/certbot/conf:/etc/letsencrypt \ + -v $APP_DIR/certbot/www:/var/www/certbot \ + certbot/certbot certonly \ + --webroot \ + --webroot-path=/var/www/certbot \ + --email $EMAIL \ + --agree-tos \ + --no-eff-email \ + -d api.hayon.site + +# ---------------------------------------------------------- +# Phase 3: Stop throwaway nginx — the real app starts via CD push +# ---------------------------------------------------------- +echo "" +echo "[Phase 3] Stopping temporary nginx..." +docker stop temp_nginx && docker rm temp_nginx + +echo "" +echo "==========================================" +echo " SSL certificates obtained!" +echo "" +ls $APP_DIR/certbot/conf/live/ +echo "" +echo " Next step: push to main branch to trigger" +echo " the GitHub Actions CD pipeline." +echo " It will pull images and start all services." +echo "==========================================" diff --git a/scripts/ec2-setup.sh b/scripts/ec2-setup.sh new file mode 100644 index 0000000..8d06b62 --- /dev/null +++ b/scripts/ec2-setup.sh @@ -0,0 +1,100 @@ +#!/bin/bash +# ============================================================= +# EC2 Bootstrap Script — Run this ONCE after SSH-ing in +# Ubuntu 22.04 LTS +# Usage: bash ec2-setup.sh +# ============================================================= + +set -e # exit on any error + +echo "==========================================" +echo " Hayon EC2 Bootstrap" +echo "==========================================" + +# ---------------------------------------------------------- +# 1. System update +# ---------------------------------------------------------- +echo "[1/6] Updating system packages..." +sudo apt-get update -y +sudo apt-get upgrade -y + +# ---------------------------------------------------------- +# 2. Install Docker +# ---------------------------------------------------------- +echo "[2/6] Installing Docker..." +sudo apt-get install -y ca-certificates curl gnupg lsb-release + +sudo install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | \ + sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +sudo chmod a+r /etc/apt/keyrings/docker.gpg + +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +sudo apt-get update -y +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +# Add ubuntu user to docker group (no sudo needed for docker) +sudo usermod -aG docker $USER + +echo "[2/6] Docker installed: $(docker --version)" +echo "[2/6] Docker Compose installed: $(docker compose version)" + +# ---------------------------------------------------------- +# 3. Create app directory +# ---------------------------------------------------------- +echo "[3/6] Creating app directory..." +mkdir -p /home/ubuntu/hayon +mkdir -p /home/ubuntu/hayon/backend +mkdir -p /home/ubuntu/hayon/certbot/conf +mkdir -p /home/ubuntu/hayon/certbot/www + +echo "[3/6] App directory ready at /home/ubuntu/hayon" + +# ---------------------------------------------------------- +# 4. Memory: swap file (important for t2.micro — only 1GB RAM) +# ---------------------------------------------------------- +echo "[4/6] Creating 2GB swap file (critical for t2.micro)..." +if [ ! -f /swapfile ]; then + sudo fallocate -l 2G /swapfile + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab + echo "[4/6] Swap created and enabled" +else + echo "[4/6] Swap already exists, skipping" +fi + +# ---------------------------------------------------------- +# 5. Firewall (UFW) +# ---------------------------------------------------------- +echo "[5/6] Configuring firewall..." +sudo ufw allow 22/tcp # SSH +sudo ufw allow 80/tcp # HTTP +sudo ufw allow 443/tcp # HTTPS +sudo ufw --force enable +echo "[5/6] Firewall configured" + +# ---------------------------------------------------------- +# 6. Git (for pulling updates) +# ---------------------------------------------------------- +echo "[6/6] Installing git..." +sudo apt-get install -y git + +echo "" +echo "==========================================" +echo " Bootstrap complete!" +echo " IMPORTANT: Log out and back in so" +echo " docker group takes effect." +echo "==========================================" +echo "" +echo "Next steps:" +echo " 1. Log out: exit" +echo " 2. SSH back in" +echo " 3. Verify: docker ps (should work without sudo)" +echo " 4. Run: bash certbot-init.sh (get SSL certs)"