From 60138d59cefb13020c5b4562a44a4a34f3eda40d Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Fri, 23 Jan 2026 15:08:09 -0600 Subject: [PATCH 01/22] feat: add user metadata support for uploads and RLS policies --- src/http/routes/object/getSignedUploadURL.ts | 10 ++++++ src/http/routes/tus/lifecycle.ts | 17 ++++++++- src/storage/object.ts | 5 ++- src/storage/protocols/s3/s3-handler.ts | 38 +++++++++++--------- src/storage/uploader.ts | 14 +++++--- src/test/cdn.test.ts | 2 +- src/test/rls.test.ts | 28 ++++++++++++--- src/test/rls_tests.yaml | 8 ++++- src/test/scanner.test.ts | 4 +-- 9 files changed, 95 insertions(+), 31 deletions(-) diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index b84c113bf..36c953ce7 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -4,6 +4,7 @@ import { getConfig } from '../../../config' import { createDefaultSchema } from '../../routes-helper' import { AuthenticatedRequest } from '../../types' import { ROUTE_OPERATIONS } from '../operations' +import { parseUserMetadata } from '../../../storage/uploader' const { uploadSignedUrlExpirationTime } = getConfig() @@ -69,10 +70,19 @@ export default async function routes(fastify: FastifyInstance) { const urlPath = `${bucketName}/${objectName}` + let userMetadata: Record | undefined + + const customMd = request.headers['x-metadata'] + + if (typeof customMd === 'string') { + userMetadata = parseUserMetadata(customMd) + } + const signedUpload = await request.storage .from(bucketName) .signUploadObjectUrl(objectName, urlPath as string, uploadSignedUrlExpirationTime, owner, { upsert: request.headers['x-upsert'] === 'true', + userMetadata: userMetadata, }) return response.status(200).send({ url: signedUpload.url, token: signedUpload.token }) diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index 6ab40b714..a52e1c249 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -3,7 +3,7 @@ import { ERRORS, isRenderableError } from '@internal/errors' import { UploadId } from '@storage/protocols/tus' import { Storage } from '@storage/storage' import { Uploader, validateMimeType } from '@storage/uploader' -import { Upload } from '@tus/server' +import { Metadata, Upload } from '@tus/server' import { randomUUID } from 'crypto' import http from 'http' import { BaseLogger } from 'pino' @@ -62,6 +62,20 @@ export async function onIncomingRequest(rawReq: Request, id: string) { req.upload.resources = [`${uploadID.bucket}/${uploadID.objectName}`] + // following same metadata parsing logic as @tus/server PostHandler so it + // it matches the value on Upload.metadata when inserted in storage.objects + // link: https://github.com/tus/tus-node-server/blob/1a6482a7a55e1587bda8c6887250f36cf9d606bd/packages/server/src/handlers/PostHandler.ts#L46 + let customMd: Record | undefined = undefined + const uploadMetadataHeader = req.headers['upload-metadata'] + + if (uploadMetadataHeader && typeof uploadMetadataHeader === 'string') { + try { + customMd = Metadata.parse(uploadMetadataHeader) + } catch (e) { + req.log.warn({ error: e }, 'Failed to parse user metadata') + } + } + // Handle signed url requests if (req.url?.startsWith(`/upload/resumable/sign`)) { const signature = req.headers['x-signature'] @@ -96,6 +110,7 @@ export async function onIncomingRequest(rawReq: Request, id: string) { bucketId: uploadID.bucket, objectName: uploadID.objectName, isUpsert, + userMetadata: customMd, }) } diff --git a/src/storage/object.ts b/src/storage/object.ts index 9a8781c50..4f9ad347c 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -97,6 +97,7 @@ export class ObjectStorage { owner: file.owner, isUpsert: Boolean(file.isUpsert), signal: file.signal, + userMetadata: uploadRequest.userMetadata, }) } @@ -337,6 +338,7 @@ export class ObjectStorage { objectName: destinationKey, owner, isUpsert: upsert, + userMetadata: userMetadata, }) try { @@ -790,7 +792,7 @@ export class ObjectStorage { url: string, expiresIn: number, owner?: string, - options?: { upsert?: boolean } + options?: { upsert?: boolean; userMetadata?: Record } ) { // check if user has INSERT permissions await this.uploader.canUpload({ @@ -798,6 +800,7 @@ export class ObjectStorage { objectName, owner, isUpsert: options?.upsert ?? false, + userMetadata: options?.userMetadata, }) const { urlSigningKey } = await getJwtSecret(this.db.tenantId) diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index 10f22424d..fd6f1971d 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -413,6 +413,7 @@ export class S3ProtocolHandler { objectName: command.Key as string, isUpsert: true, owner: this.owner, + userMetadata: command.Metadata, }) const uploadId = await this.storage.backend.createMultiPartUpload( @@ -470,17 +471,18 @@ export class S3ProtocolHandler { throw ERRORS.InvalidUploadId() } + const multiPartUpload = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'id,version,user_metadata') + await uploader.canUpload({ bucketId: Bucket as string, objectName: Key as string, isUpsert: true, owner: this.owner, + userMetadata: multiPartUpload.user_metadata || undefined, }) - const multiPartUpload = await this.storage.db - .asSuperUser() - .findMultipartUpload(UploadId, 'id,version,user_metadata') - const parts = command.MultipartUpload?.Parts || [] if (parts.length === 0) { @@ -578,15 +580,17 @@ export class S3ProtocolHandler { const maxFileSize = await getFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) + + const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) + await uploader.canUpload({ bucketId: Bucket as string, objectName: Key as string, owner: this.owner, isUpsert: true, + userMetadata: multipart.user_metadata || undefined, }) - const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) - if (signal?.aborted) { throw ERRORS.AbortedTerminate('UploadPart aborted') } @@ -695,9 +699,9 @@ export class S3ProtocolHandler { cacheControl: command.CacheControl!, mimeType: command.ContentType!, isTruncated: options.isTruncated, - userMetadata: command.Metadata, }, objectName: command.Key as string, + userMetadata: command.Metadata, owner: this.owner, isUpsert: true, uploadType: 's3', @@ -735,7 +739,7 @@ export class S3ProtocolHandler { const multipart = await this.storage.db .asSuperUser() - .findMultipartUpload(UploadId, 'id,version') + .findMultipartUpload(UploadId, 'id,version,user_metadata') const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) await uploader.canUpload({ @@ -743,6 +747,7 @@ export class S3ProtocolHandler { objectName: Key, owner: this.owner, isUpsert: true, + userMetadata: multipart.user_metadata || undefined, }) await this.storage.backend.abortMultipartUpload( @@ -1233,13 +1238,6 @@ export class S3ProtocolHandler { const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) - await uploader.canUpload({ - bucketId: Bucket, - objectName: Key, - owner: this.owner, - isUpsert: true, - }) - const [destinationBucket] = await this.storage.db.asSuperUser().withTransaction(async (db) => { return Promise.all([ db.findBucketById(Bucket, 'file_size_limit'), @@ -1253,6 +1251,14 @@ export class S3ProtocolHandler { const multipart = await this.shouldAllowPartUpload(UploadId, Number(copySize), maxFileSize) + await uploader.canUpload({ + bucketId: Bucket, + objectName: Key, + owner: this.owner, + isUpsert: true, + userMetadata: multipart.user_metadata || undefined, + }) + const uploadPart = await this.storage.backend.uploadPartCopy( storageS3Bucket, this.storage.location.getKeyLocation({ @@ -1324,7 +1330,7 @@ export class S3ProtocolHandler { return this.storage.db.asSuperUser().withTransaction(async (db) => { const multipart = await db.findMultipartUpload( uploadId, - 'in_progress_size,version,upload_signature', + 'in_progress_size,version,upload_signature,user_metadata', { forUpdate: true, } diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 8b185487e..59fe1a228 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -20,13 +20,13 @@ interface FileUpload { cacheControl: string isTruncated: () => boolean xRobotsTag?: string - userMetadata?: Record } export interface UploadRequest { bucketId: string objectName: string file: FileUpload + userMetadata: Record | undefined owner?: string isUpsert?: boolean uploadType?: 'standard' | 's3' | 'resumable' @@ -46,7 +46,9 @@ export class Uploader { private readonly location: StorageObjectLocator ) {} - async canUpload(options: Pick) { + async canUpload( + options: Pick + ) { const shouldCreateObject = !options.isUpsert if (shouldCreateObject) { @@ -56,6 +58,7 @@ export class Uploader { name: options.objectName, version: '1', owner: options.owner, + user_metadata: options.userMetadata, }) }) } else { @@ -65,6 +68,7 @@ export class Uploader { name: options.objectName, version: '1', owner: options.owner, + user_metadata: options.userMetadata, }) }) } @@ -125,7 +129,7 @@ export class Uploader { ...request, version, objectMetadata, - userMetadata: { ...file.userMetadata }, + userMetadata: { ...request.userMetadata }, }) } catch (e) { await ObjectAdminDelete.send({ @@ -323,7 +327,9 @@ export async function fileUploadFromRequest( allowedMimeTypes?: string[] objectName: string } -): Promise { +): Promise< + FileUpload & { maxFileSize: number; userMetadata: Record | undefined } +> { const contentType = request.headers['content-type'] const xRobotsTag = request.headers['x-robots-tag'] as string | undefined diff --git a/src/test/cdn.test.ts b/src/test/cdn.test.ts index cee987215..c25b64b72 100644 --- a/src/test/cdn.test.ts +++ b/src/test/cdn.test.ts @@ -94,12 +94,12 @@ describe('CDN Cache Manager', () => { await storageHook.storage.from(bucketName).uploadNewObject({ isUpsert: true, objectName, + userMetadata: {}, file: { body: Readable.from(Buffer.from('test')), cacheControl: 'public, max-age=31536000', mimeType: 'text/plain', isTruncated: () => false, - userMetadata: {}, }, }) diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index 4f572d58e..61e4b4071 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -55,6 +55,7 @@ interface TestCaseAssert { useExistingBucketName?: string role?: string policies?: string[] + userMetadata?: Record status: number error?: string } @@ -233,6 +234,7 @@ describe('RLS policies', () => { bucket: bucketName, objectName, jwt: assert.role === 'service' ? await serviceKeyAsync : jwt, + userMetadata: assert.userMetadata, }) console.log( @@ -294,15 +296,20 @@ describe('RLS policies', () => { async function runOperation( operation: TestCaseAssert['operation'], - options: { bucket: string; jwt: string; objectName: string } + options: { + bucket: string + jwt: string + objectName: string + userMetadata?: Record + } ) { - const { jwt, bucket, objectName } = options + const { jwt, bucket, objectName, userMetadata } = options switch (operation) { case 'upload': - return uploadFile(bucket, objectName, jwt) + return uploadFile(bucket, objectName, jwt, false, userMetadata) case 'upload.upsert': - return uploadFile(bucket, objectName, jwt, true) + return uploadFile(bucket, objectName, jwt, true, userMetadata) case 'bucket.list': return appInstance.inject({ method: 'GET', @@ -454,10 +461,21 @@ async function createPolicy(db: Knex, policy: Policy) { return Promise.all(created) } -async function uploadFile(bucket: string, fileName: string, jwt: string, upsert?: boolean) { +async function uploadFile( + bucket: string, + fileName: string, + jwt: string, + upsert?: boolean, + userMetadata?: Record +) { const testFile = fs.createReadStream(path.resolve(__dirname, 'assets', 'sadcat.jpg')) const form = new FormData() form.append('file', testFile) + + if (userMetadata) { + form.append('metadata', JSON.stringify(userMetadata)) + } + const headers = Object.assign({}, form.getHeaders(), { authorization: `Bearer ${jwt}`, ...(upsert ? { 'x-upsert': 'true' } : {}), diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index 2581bc9d0..b50efff55 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -47,6 +47,12 @@ policies: permissions: ["delete"] content: "USING(owner = '{{uid}}')" + - name: insert_with_metadata_check + tables: ['storage.objects'] + roles: ['authenticated'] + permissions: ['insert'] + content: "WITH CHECK(user_metadata->>'department' = 'engineering')" + tests: - description: "Will only able to read objects" policies: @@ -474,4 +480,4 @@ tests: - operation: bucket.delete status: 400 - error: "Bucket not found" + error: 'Bucket not found' diff --git a/src/test/scanner.test.ts b/src/test/scanner.test.ts index dd3ebf266..d4d81f94d 100644 --- a/src/test/scanner.test.ts +++ b/src/test/scanner.test.ts @@ -24,11 +24,11 @@ describe('ObjectScanner', () => { bucketId: bucket.id, objectName: randomUUID() + `-test-${i}.text`, uploadType: 'standard', + userMetadata: {}, file: { body: Readable.from(Buffer.from('test')), mimeType: 'text/plain', cacheControl: 'no-cache', - userMetadata: {}, isTruncated: () => false, }, }) @@ -91,11 +91,11 @@ describe('ObjectScanner', () => { bucketId: bucket.id, objectName: randomUUID() + `-test-${i}.text`, uploadType: 'standard', + userMetadata: {}, file: { body: Readable.from(Buffer.from('test')), mimeType: 'text/plain', cacheControl: 'no-cache', - userMetadata: {}, isTruncated: () => false, }, }) From c38c6e4843fda6213c28149df57d79b890f5353a Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 26 Jan 2026 13:13:28 -0600 Subject: [PATCH 02/22] fix: update metadata parsing logic to handle user metadata correctly --- src/http/routes/tus/lifecycle.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index a52e1c249..0e3fa3963 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -62,15 +62,15 @@ export async function onIncomingRequest(rawReq: Request, id: string) { req.upload.resources = [`${uploadID.bucket}/${uploadID.objectName}`] - // following same metadata parsing logic as @tus/server PostHandler so it - // it matches the value on Upload.metadata when inserted in storage.objects - // link: https://github.com/tus/tus-node-server/blob/1a6482a7a55e1587bda8c6887250f36cf9d606bd/packages/server/src/handlers/PostHandler.ts#L46 - let customMd: Record | undefined = undefined + let customMd: Record | undefined = undefined const uploadMetadataHeader = req.headers['upload-metadata'] if (uploadMetadataHeader && typeof uploadMetadataHeader === 'string') { try { - customMd = Metadata.parse(uploadMetadataHeader) + const parsedMetadata = Metadata.parse(uploadMetadataHeader) + if (parsedMetadata?.metadata) { + customMd = JSON.parse(parsedMetadata.metadata) + } } catch (e) { req.log.warn({ error: e }, 'Failed to parse user metadata') } From d65295b659227802f552b37000188481fab73451 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 2 Feb 2026 19:22:51 -0600 Subject: [PATCH 03/22] feat: add file metadata support for uploads and RLS policies --- .../0056-s3-multipart-uploads-metadata.sql | 1 + src/http/routes/object/getSignedUploadURL.ts | 10 ++ src/http/routes/tus/lifecycle.ts | 8 ++ src/internal/database/migrations/types.ts | 121 +++++++++--------- src/storage/database/adapter.ts | 3 +- src/storage/database/knex.ts | 6 +- src/storage/object.ts | 10 +- src/storage/protocols/s3/s3-handler.ts | 16 ++- src/storage/schemas/multipart.ts | 3 + src/storage/uploader.ts | 43 ++++++- src/test/rls.test.ts | 21 ++- src/test/rls_tests.yaml | 55 ++++++++ 12 files changed, 219 insertions(+), 78 deletions(-) create mode 100644 migrations/tenant/0056-s3-multipart-uploads-metadata.sql diff --git a/migrations/tenant/0056-s3-multipart-uploads-metadata.sql b/migrations/tenant/0056-s3-multipart-uploads-metadata.sql new file mode 100644 index 000000000..ef9496bb8 --- /dev/null +++ b/migrations/tenant/0056-s3-multipart-uploads-metadata.sql @@ -0,0 +1 @@ +ALTER TABLE storage.s3_multipart_uploads ADD COLUMN IF NOT EXISTS metadata jsonb NULL; \ No newline at end of file diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index 36c953ce7..ccd55d065 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -21,6 +21,8 @@ const getSignedUploadURLHeadersSchema = { type: 'object', properties: { 'x-upsert': { type: 'string' }, + 'content-type': { type: 'string' }, + 'content-length': { type: 'string' }, authorization: { type: 'string' }, }, required: ['authorization'], @@ -78,11 +80,19 @@ export default async function routes(fastify: FastifyInstance) { userMetadata = parseUserMetadata(customMd) } + const contentType = request.headers['content-type'] + const contentLengthHeader = request.headers['content-length'] + const contentLength = contentLengthHeader ? Number(contentLengthHeader) : undefined + const signedUpload = await request.storage .from(bucketName) .signUploadObjectUrl(objectName, urlPath as string, uploadSignedUrlExpirationTime, owner, { upsert: request.headers['x-upsert'] === 'true', userMetadata: userMetadata, + metadata: { + mimetype: contentType, + contentLength: contentLength, + }, }) return response.status(200).send({ url: signedUpload.url, token: signedUpload.token }) diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index 0e3fa3963..4b564c1af 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -105,12 +105,20 @@ export async function onIncomingRequest(rawReq: Request, id: string) { req.upload.storage.location ) + const uploadLength = req.headers['upload-length'] + const contentLength = uploadLength ? Number(uploadLength) : undefined + const contentType = req.headers['content-type'] + await uploader.canUpload({ owner: req.upload.owner, bucketId: uploadID.bucket, objectName: uploadID.objectName, isUpsert, userMetadata: customMd, + metadata: { + mimetype: contentType, + contentLength: contentLength, + }, }) } diff --git a/src/internal/database/migrations/types.ts b/src/internal/database/migrations/types.ts index 35bd839c0..bebd77633 100644 --- a/src/internal/database/migrations/types.ts +++ b/src/internal/database/migrations/types.ts @@ -1,59 +1,62 @@ -export const DBMigration = { - 'create-migrations-table': 0, - initialmigration: 1, - 'storage-schema': 2, - 'pathtoken-column': 3, - 'add-migrations-rls': 4, - 'add-size-functions': 5, - 'change-column-name-in-get-size': 6, - 'add-rls-to-buckets': 7, - 'add-public-to-buckets': 8, - 'fix-search-function': 9, - 'search-files-search-function': 10, - 'add-trigger-to-auto-update-updated_at-column': 11, - 'add-automatic-avif-detection-flag': 12, - 'add-bucket-custom-limits': 13, - 'use-bytes-for-max-size': 14, - 'add-can-insert-object-function': 15, - 'add-version': 16, - 'drop-owner-foreign-key': 17, - add_owner_id_column_deprecate_owner: 18, - 'alter-default-value-objects-id': 19, - 'list-objects-with-delimiter': 20, - 's3-multipart-uploads': 21, - 's3-multipart-uploads-big-ints': 22, - 'optimize-search-function': 23, - 'operation-function': 24, - 'custom-metadata': 25, - 'objects-prefixes': 26, - 'search-v2': 27, - 'object-bucket-name-sorting': 28, - 'create-prefixes': 29, - 'update-object-levels': 30, - 'objects-level-index': 31, - 'backward-compatible-index-on-objects': 32, - 'backward-compatible-index-on-prefixes': 33, - 'optimize-search-function-v1': 34, - 'add-insert-trigger-prefixes': 35, - 'optimise-existing-functions': 36, - 'add-bucket-name-length-trigger': 37, - 'iceberg-catalog-flag-on-buckets': 38, - 'add-search-v2-sort-support': 39, - 'fix-prefix-race-conditions-optimized': 40, - 'add-object-level-update-trigger': 41, - 'rollback-prefix-triggers': 42, - 'fix-object-level': 43, - 'vector-bucket-type': 44, - 'vector-buckets': 45, - 'buckets-objects-grants': 46, - 'iceberg-table-metadata': 47, - 'iceberg-catalog-ids': 48, - 'buckets-objects-grants-postgres': 49, - 'search-v2-optimised': 50, - 'index-backward-compatible-search': 51, - 'drop-not-used-indexes-and-functions': 52, - 'drop-index-lower-name': 53, - 'drop-index-object-level': 54, - 'prevent-direct-deletes': 55, - 'fix-optimized-search-function': 56, -} + + export const DBMigration = { + 'create-migrations-table': 0, + 'initialmigration': 1, + 'storage-schema': 2, + 'pathtoken-column': 3, + 'add-migrations-rls': 4, + 'add-size-functions': 5, + 'change-column-name-in-get-size': 6, + 'add-rls-to-buckets': 7, + 'add-public-to-buckets': 8, + 'fix-search-function': 9, + 'search-files-search-function': 10, + 'add-trigger-to-auto-update-updated_at-column': 11, + 'add-automatic-avif-detection-flag': 12, + 'add-bucket-custom-limits': 13, + 'use-bytes-for-max-size': 14, + 'add-can-insert-object-function': 15, + 'add-version': 16, + 'drop-owner-foreign-key': 17, + 'add_owner_id_column_deprecate_owner': 18, + 'alter-default-value-objects-id': 19, + 'list-objects-with-delimiter': 20, + 's3-multipart-uploads': 21, + 's3-multipart-uploads-big-ints': 22, + 'optimize-search-function': 23, + 'operation-function': 24, + 'custom-metadata': 25, + 'objects-prefixes': 26, + 'search-v2': 27, + 'object-bucket-name-sorting': 28, + 'create-prefixes': 29, + 'update-object-levels': 30, + 'objects-level-index': 31, + 'backward-compatible-index-on-objects': 32, + 'backward-compatible-index-on-prefixes': 33, + 'optimize-search-function-v1': 34, + 'add-insert-trigger-prefixes': 35, + 'optimise-existing-functions': 36, + 'add-bucket-name-length-trigger': 37, + 'iceberg-catalog-flag-on-buckets': 38, + 'add-search-v2-sort-support': 39, + 'fix-prefix-race-conditions-optimized': 40, + 'add-object-level-update-trigger': 41, + 'rollback-prefix-triggers': 42, + 'fix-object-level': 43, + 'vector-bucket-type': 44, + 'vector-buckets': 45, + 'buckets-objects-grants': 46, + 'iceberg-table-metadata': 47, + 'iceberg-catalog-ids': 48, + 'buckets-objects-grants-postgres': 49, + 'search-v2-optimised': 50, + 'index-backward-compatible-search': 51, + 'drop-not-used-indexes-and-functions': 52, + 'drop-index-lower-name': 53, + 'drop-index-object-level': 54, + 'prevent-direct-deletes': 55, + 'fix-optimized-search-function': 56, + 's3-multipart-uploads-metadata': 57, + } + \ No newline at end of file diff --git a/src/storage/database/adapter.ts b/src/storage/database/adapter.ts index db190f483..d90b0beaf 100644 --- a/src/storage/database/adapter.ts +++ b/src/storage/database/adapter.ts @@ -195,7 +195,8 @@ export interface Database { version: string, signature: string, owner?: string, - metadata?: Record + userMetadata?: Record, + metadata?: Partial ): Promise findMultipartUpload( diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 546657109..2a57b3af8 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -912,7 +912,8 @@ export class StorageKnexDB implements Database { version: string, signature: string, owner?: string, - metadata?: Record + userMetadata?: Record, + metadata?: Partial ) { return this.runQuery('CreateMultipartUpload', async (knex, signal) => { const multipart = await knex @@ -925,7 +926,8 @@ export class StorageKnexDB implements Database { version, upload_signature: signature, owner_id: owner, - user_metadata: metadata, + user_metadata: userMetadata, + metadata: metadata, }) ) .returning('*') diff --git a/src/storage/object.ts b/src/storage/object.ts index 4f9ad347c..0718bc79a 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -17,7 +17,7 @@ import { ObjectUpdatedMetadata, } from './events' import { mustBeValidKey } from './limits' -import { fileUploadFromRequest, Uploader, UploadRequest } from './uploader' +import { fileUploadFromRequest, Uploader, UploadRequest, CanUploadMetadata} from './uploader' const { requestUrlLengthLimit } = getConfig() @@ -339,6 +339,7 @@ export class ObjectStorage { owner, isUpsert: upsert, userMetadata: userMetadata, + metadata: destinationMetadata, }) try { @@ -792,7 +793,11 @@ export class ObjectStorage { url: string, expiresIn: number, owner?: string, - options?: { upsert?: boolean; userMetadata?: Record } + options?: { + upsert?: boolean + userMetadata?: Record + metadata?: CanUploadMetadata + } ) { // check if user has INSERT permissions await this.uploader.canUpload({ @@ -801,6 +806,7 @@ export class ObjectStorage { owner, isUpsert: options?.upsert ?? false, userMetadata: options?.userMetadata, + metadata: options?.metadata, }) const { urlSigningKey } = await getJwtSecret(this.db.tenantId) diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index fd6f1971d..c347a794d 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -414,6 +414,9 @@ export class S3ProtocolHandler { isUpsert: true, owner: this.owner, userMetadata: command.Metadata, + metadata: { + mimetype: command.ContentType, + }, }) const uploadId = await this.storage.backend.createMultiPartUpload( @@ -442,7 +445,8 @@ export class S3ProtocolHandler { version, signature, this.owner, - command.Metadata + command.Metadata, + { mimetype: command.ContentType } ) return { @@ -473,7 +477,7 @@ export class S3ProtocolHandler { const multiPartUpload = await this.storage.db .asSuperUser() - .findMultipartUpload(UploadId, 'id,version,user_metadata') + .findMultipartUpload(UploadId, 'id,version,user_metadata,metadata') await uploader.canUpload({ bucketId: Bucket as string, @@ -481,6 +485,7 @@ export class S3ProtocolHandler { isUpsert: true, owner: this.owner, userMetadata: multiPartUpload.user_metadata || undefined, + metadata: multiPartUpload.metadata || undefined, }) const parts = command.MultipartUpload?.Parts || [] @@ -589,6 +594,7 @@ export class S3ProtocolHandler { owner: this.owner, isUpsert: true, userMetadata: multipart.user_metadata || undefined, + metadata: multipart.metadata || undefined, }) if (signal?.aborted) { @@ -739,7 +745,7 @@ export class S3ProtocolHandler { const multipart = await this.storage.db .asSuperUser() - .findMultipartUpload(UploadId, 'id,version,user_metadata') + .findMultipartUpload(UploadId, 'id,version,user_metadata,metadata') const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) await uploader.canUpload({ @@ -748,6 +754,7 @@ export class S3ProtocolHandler { owner: this.owner, isUpsert: true, userMetadata: multipart.user_metadata || undefined, + metadata: multipart.metadata || undefined, }) await this.storage.backend.abortMultipartUpload( @@ -1257,6 +1264,7 @@ export class S3ProtocolHandler { owner: this.owner, isUpsert: true, userMetadata: multipart.user_metadata || undefined, + metadata: multipart.metadata || undefined, }) const uploadPart = await this.storage.backend.uploadPartCopy( @@ -1330,7 +1338,7 @@ export class S3ProtocolHandler { return this.storage.db.asSuperUser().withTransaction(async (db) => { const multipart = await db.findMultipartUpload( uploadId, - 'in_progress_size,version,upload_signature,user_metadata', + 'in_progress_size,version,upload_signature,user_metadata,metadata', { forUpdate: true, } diff --git a/src/storage/schemas/multipart.ts b/src/storage/schemas/multipart.ts index 1ca6eb296..3a1849ac8 100644 --- a/src/storage/schemas/multipart.ts +++ b/src/storage/schemas/multipart.ts @@ -15,6 +15,9 @@ export const multipartUploadSchema = { user_metadata: { anyOf: [{ type: 'object', additionalProperties: true }, { type: 'null' }], }, + metadata: { + anyOf: [{ type: 'object', additionalProperties: true }, { type: 'null' }], + }, }, required: [ 'id', diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 59fe1a228..3a36d63cb 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -20,6 +20,7 @@ interface FileUpload { cacheControl: string isTruncated: () => boolean xRobotsTag?: string + contentLength?: number } export interface UploadRequest { @@ -33,6 +34,18 @@ export interface UploadRequest { signal?: AbortSignal } +export type CanUploadMetadata = Partial> & + Record + +export interface CanUploadOptions { + bucketId: string + objectName: string + owner: string | undefined + isUpsert: boolean | undefined + userMetadata: Record | undefined + metadata: CanUploadMetadata | undefined +} + const MAX_CUSTOM_METADATA_SIZE = 1024 * 1024 /** @@ -46,9 +59,7 @@ export class Uploader { private readonly location: StorageObjectLocator ) {} - async canUpload( - options: Pick - ) { + async canUpload(options: CanUploadOptions) { const shouldCreateObject = !options.isUpsert if (shouldCreateObject) { @@ -58,6 +69,7 @@ export class Uploader { name: options.objectName, version: '1', owner: options.owner, + metadata: options.metadata, user_metadata: options.userMetadata, }) }) @@ -68,6 +80,7 @@ export class Uploader { name: options.objectName, version: '1', owner: options.owner, + metadata: options.metadata, user_metadata: options.userMetadata, }) }) @@ -79,7 +92,7 @@ export class Uploader { * We check RLS policies before proceeding * @param options */ - async prepareUpload(options: Omit) { + async prepareUpload(options: CanUploadOptions & { uploadType?: string }) { await this.canUpload(options) fileUploadStarted.add(1, { uploadType: options.uploadType, @@ -96,7 +109,15 @@ export class Uploader { * @param options */ async upload(request: UploadRequest) { - const version = await this.prepareUpload(request) + const version = await this.prepareUpload({ + bucketId: request.bucketId, + objectName: request.objectName, + owner: request.owner, + isUpsert: request.isUpsert, + userMetadata: request.userMetadata, + metadata: { mimetype: request.file.mimeType, contentLength: request.file.contentLength }, + uploadType: request.uploadType, + }) try { const file = request.file @@ -328,7 +349,12 @@ export async function fileUploadFromRequest( objectName: string } ): Promise< - FileUpload & { maxFileSize: number; userMetadata: Record | undefined } + FileUpload & { + mimeType: string + maxFileSize: number + userMetadata: Record | undefined + contentLength: number | undefined + } > { const contentType = request.headers['content-type'] const xRobotsTag = request.headers['x-robots-tag'] as string | undefined @@ -427,6 +453,10 @@ export async function fileUploadFromRequest( throw ERRORS.NoContentProvided(new Error('Request stream closed before upload could begin')) } + const contentLength = request.headers['content-length'] + ? Number(request.headers['content-length']) + : undefined + return { body, mimeType, @@ -435,6 +465,7 @@ export async function fileUploadFromRequest( userMetadata, maxFileSize, xRobotsTag, + contentLength, } } diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index 61e4b4071..e8e0a98d2 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -56,6 +56,8 @@ interface TestCaseAssert { role?: string policies?: string[] userMetadata?: Record + mimeType?: string + contentLength?: number status: number error?: string } @@ -235,6 +237,8 @@ describe('RLS policies', () => { objectName, jwt: assert.role === 'service' ? await serviceKeyAsync : jwt, userMetadata: assert.userMetadata, + mimeType: assert.mimeType, + contentLength: assert.contentLength, }) console.log( @@ -301,15 +305,17 @@ async function runOperation( jwt: string objectName: string userMetadata?: Record + mimeType?: string + contentLength?: number } ) { - const { jwt, bucket, objectName, userMetadata } = options + const { jwt, bucket, objectName, userMetadata, mimeType, contentLength } = options switch (operation) { case 'upload': - return uploadFile(bucket, objectName, jwt, false, userMetadata) + return uploadFile(bucket, objectName, jwt, false, userMetadata, mimeType, contentLength) case 'upload.upsert': - return uploadFile(bucket, objectName, jwt, true, userMetadata) + return uploadFile(bucket, objectName, jwt, true, userMetadata, mimeType, contentLength) case 'bucket.list': return appInstance.inject({ method: 'GET', @@ -466,7 +472,9 @@ async function uploadFile( fileName: string, jwt: string, upsert?: boolean, - userMetadata?: Record + userMetadata?: Record, + mimeType?: string, + contentLength?: number ) { const testFile = fs.createReadStream(path.resolve(__dirname, 'assets', 'sadcat.jpg')) const form = new FormData() @@ -476,9 +484,14 @@ async function uploadFile( form.append('metadata', JSON.stringify(userMetadata)) } + if (mimeType) { + form.append('contentType', mimeType) + } + const headers = Object.assign({}, form.getHeaders(), { authorization: `Bearer ${jwt}`, ...(upsert ? { 'x-upsert': 'true' } : {}), + ...(contentLength ? { 'content-length': contentLength.toString() } : {}), }) return appInstance.inject({ diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index b50efff55..4f1fe0abe 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -53,6 +53,18 @@ policies: permissions: ['insert'] content: "WITH CHECK(user_metadata->>'department' = 'engineering')" + - name: insert_only_images + tables: ['storage.objects'] + roles: ['authenticated'] + permissions: ['insert'] + content: "WITH CHECK(metadata->>'mimetype' LIKE 'image/%')" + + - name: insert_max_size_limit + tables: ['storage.objects'] + roles: ['authenticated'] + permissions: ['insert'] + content: "WITH CHECK((metadata->>'contentLength')::int <= 100000)" + tests: - description: "Will only able to read objects" policies: @@ -481,3 +493,46 @@ tests: - operation: bucket.delete status: 400 error: 'Bucket not found' + + - description: 'Will only upload files with correct user metadata' + policies: + - insert_with_metadata_check + asserts: + - operation: upload + objectName: 'test_file.jpg' + userMetadata: + department: 'engineering' + status: 200 + + - operation: upload + status: 400 + error: 'new row violates row-level security policy' + + - description: 'Will only upload image files based on mimetype' + policies: + - insert_only_images + asserts: + - operation: upload + objectName: 'test_image.jpg' + mimeType: 'image/jpeg' + status: 200 + + - operation: upload + objectName: 'test_file.txt' + mimeType: 'text/plain' + status: 400 + error: 'new row violates row-level security policy' + + - description: 'Will only upload files under size limit based on contentLength' + policies: + - insert_max_size_limit + asserts: + - operation: upload + objectName: 'small_file.jpg' + status: 200 + + - operation: upload + objectName: 'large_file.jpg' + contentLength: 200000 + status: 400 + error: 'new row violates row-level security policy' From a6e40f35de3dd2a738d7f625267e72b998d62243 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 9 Feb 2026 09:43:43 -0600 Subject: [PATCH 04/22] refactor: simplify userMetadata and contentLength assignment in upload URL signing --- src/http/routes/object/getSignedUploadURL.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index ccd55d065..bc0d5ada2 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -88,10 +88,10 @@ export default async function routes(fastify: FastifyInstance) { .from(bucketName) .signUploadObjectUrl(objectName, urlPath as string, uploadSignedUrlExpirationTime, owner, { upsert: request.headers['x-upsert'] === 'true', - userMetadata: userMetadata, + userMetadata, metadata: { mimetype: contentType, - contentLength: contentLength, + contentLength, }, }) From e0fc664b65cbd659a02c867b1787a4ba497e634c Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 9 Feb 2026 10:55:20 -0600 Subject: [PATCH 05/22] fix: migration file number --- ...ploads-metadata.sql => 0057-s3-multipart-uploads-metadata.sql} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename migrations/tenant/{0056-s3-multipart-uploads-metadata.sql => 0057-s3-multipart-uploads-metadata.sql} (100%) diff --git a/migrations/tenant/0056-s3-multipart-uploads-metadata.sql b/migrations/tenant/0057-s3-multipart-uploads-metadata.sql similarity index 100% rename from migrations/tenant/0056-s3-multipart-uploads-metadata.sql rename to migrations/tenant/0057-s3-multipart-uploads-metadata.sql From 511946e348dd2ab904348922b2d2b30e1caa81ff Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 9 Feb 2026 11:41:03 -0600 Subject: [PATCH 06/22] feat: add contentLength to S3 object creation parameters --- src/storage/protocols/s3/s3-handler.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index c347a794d..d6de43f75 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -705,6 +705,7 @@ export class S3ProtocolHandler { cacheControl: command.CacheControl!, mimeType: command.ContentType!, isTruncated: options.isTruncated, + contentLength: command.ContentLength, }, objectName: command.Key as string, userMetadata: command.Metadata, From 20c71df5ef2d404acc6937ca950cc374a151b1cb Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Mon, 9 Feb 2026 12:01:36 -0600 Subject: [PATCH 07/22] fix: add migration guard for metadata column on s3_multipart_uploads --- src/storage/database/knex.ts | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 2a57b3af8..3b9ef206b 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -916,20 +916,26 @@ export class StorageKnexDB implements Database { metadata?: Partial ) { return this.runQuery('CreateMultipartUpload', async (knex, signal) => { + const data: Record = { + id: uploadId, + bucket_id: bucketId, + key: objectName, + version, + upload_signature: signature, + owner_id: owner, + user_metadata: userMetadata, + } + + if ( + !this.latestMigration || + DBMigration[this.latestMigration] >= DBMigration['s3-multipart-uploads-metadata'] + ) { + data.metadata = metadata + } + const multipart = await knex .table('s3_multipart_uploads') - .insert( - this.normalizeColumns({ - id: uploadId, - bucket_id: bucketId, - key: objectName, - version, - upload_signature: signature, - owner_id: owner, - user_metadata: userMetadata, - metadata: metadata, - }) - ) + .insert(this.normalizeColumns(data)) .returning('*') .abortOnSignal(signal) From a77d55d1b5df9eddbde2c3fd0fd0b6416aa1311f Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Tue, 10 Feb 2026 14:25:01 -0600 Subject: [PATCH 08/22] fix: handle tus uploads differently from first upload vs rest --- src/http/routes/tus/index.ts | 2 +- src/http/routes/tus/lifecycle.ts | 57 ++++++---- src/internal/database/migrations/types.ts | 122 +++++++++++----------- src/test/rls.test.ts | 73 ++++++++++++- src/test/rls_tests.yaml | 31 ++++++ 5 files changed, 201 insertions(+), 84 deletions(-) diff --git a/src/http/routes/tus/index.ts b/src/http/routes/tus/index.ts index b75f88cab..efe5a3de6 100644 --- a/src/http/routes/tus/index.ts +++ b/src/http/routes/tus/index.ts @@ -137,7 +137,7 @@ function createTusServer( namingFunction, onUploadCreate: onCreate, onUploadFinish, - onIncomingRequest, + onIncomingRequest: (req, id) => onIncomingRequest(req, id, datastore), generateUrl, getFileIdFromRequest, onResponseError, diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index 4b564c1af..22b34e643 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -3,7 +3,7 @@ import { ERRORS, isRenderableError } from '@internal/errors' import { UploadId } from '@storage/protocols/tus' import { Storage } from '@storage/storage' import { Uploader, validateMimeType } from '@storage/uploader' -import { Metadata, Upload } from '@tus/server' +import { DataStore, Metadata, Upload } from '@tus/server' import { randomUUID } from 'crypto' import http from 'http' import { BaseLogger } from 'pino' @@ -44,7 +44,7 @@ export type MultiPartRequest = http.IncomingMessage & { /** * Runs on every TUS incoming request */ -export async function onIncomingRequest(rawReq: Request, id: string) { +export async function onIncomingRequest(rawReq: Request, id: string, datastore: DataStore) { const req = getNodeRequest(rawReq) const res = rawReq.node?.res as http.ServerResponse @@ -62,20 +62,6 @@ export async function onIncomingRequest(rawReq: Request, id: string) { req.upload.resources = [`${uploadID.bucket}/${uploadID.objectName}`] - let customMd: Record | undefined = undefined - const uploadMetadataHeader = req.headers['upload-metadata'] - - if (uploadMetadataHeader && typeof uploadMetadataHeader === 'string') { - try { - const parsedMetadata = Metadata.parse(uploadMetadataHeader) - if (parsedMetadata?.metadata) { - customMd = JSON.parse(parsedMetadata.metadata) - } - } catch (e) { - req.log.warn({ error: e }, 'Failed to parse user metadata') - } - } - // Handle signed url requests if (req.url?.startsWith(`/upload/resumable/sign`)) { const signature = req.headers['x-signature'] @@ -105,9 +91,42 @@ export async function onIncomingRequest(rawReq: Request, id: string) { req.upload.storage.location ) - const uploadLength = req.headers['upload-length'] - const contentLength = uploadLength ? Number(uploadLength) : undefined - const contentType = req.headers['content-type'] + let contentType: string | undefined + let contentLength: number | undefined + let rawMetadata: string | null | undefined + + if (req.method === 'POST') { + const uploadMetadataHeader = req.headers['upload-metadata'] + if (uploadMetadataHeader && typeof uploadMetadataHeader === 'string') { + try { + const parsedMetadata = Metadata.parse(uploadMetadataHeader) + contentType = parsedMetadata?.contentType ?? undefined + rawMetadata = parsedMetadata?.metadata + } catch (e) { + req.log.warn({ error: e }, 'Failed to parse upload metadata') + throw ERRORS.InvalidParameter('upload-metadata', { + error: e as Error, + message: 'Invalid Upload-Metadata header', + }) + } + } + const uploadLength = req.headers['upload-length'] + contentLength = uploadLength ? Number(uploadLength) : undefined + } else { + const upload = await datastore.getUpload(id) + contentType = upload.metadata?.contentType ?? undefined + contentLength = upload.size ?? undefined + rawMetadata = upload.metadata?.metadata + } + + let customMd: Record | undefined + if (rawMetadata) { + try { + customMd = JSON.parse(rawMetadata) + } catch (e) { + req.log.warn({ error: e }, 'Failed to parse user metadata') + } + } await uploader.canUpload({ owner: req.upload.owner, diff --git a/src/internal/database/migrations/types.ts b/src/internal/database/migrations/types.ts index bebd77633..1abbd291c 100644 --- a/src/internal/database/migrations/types.ts +++ b/src/internal/database/migrations/types.ts @@ -1,62 +1,60 @@ - - export const DBMigration = { - 'create-migrations-table': 0, - 'initialmigration': 1, - 'storage-schema': 2, - 'pathtoken-column': 3, - 'add-migrations-rls': 4, - 'add-size-functions': 5, - 'change-column-name-in-get-size': 6, - 'add-rls-to-buckets': 7, - 'add-public-to-buckets': 8, - 'fix-search-function': 9, - 'search-files-search-function': 10, - 'add-trigger-to-auto-update-updated_at-column': 11, - 'add-automatic-avif-detection-flag': 12, - 'add-bucket-custom-limits': 13, - 'use-bytes-for-max-size': 14, - 'add-can-insert-object-function': 15, - 'add-version': 16, - 'drop-owner-foreign-key': 17, - 'add_owner_id_column_deprecate_owner': 18, - 'alter-default-value-objects-id': 19, - 'list-objects-with-delimiter': 20, - 's3-multipart-uploads': 21, - 's3-multipart-uploads-big-ints': 22, - 'optimize-search-function': 23, - 'operation-function': 24, - 'custom-metadata': 25, - 'objects-prefixes': 26, - 'search-v2': 27, - 'object-bucket-name-sorting': 28, - 'create-prefixes': 29, - 'update-object-levels': 30, - 'objects-level-index': 31, - 'backward-compatible-index-on-objects': 32, - 'backward-compatible-index-on-prefixes': 33, - 'optimize-search-function-v1': 34, - 'add-insert-trigger-prefixes': 35, - 'optimise-existing-functions': 36, - 'add-bucket-name-length-trigger': 37, - 'iceberg-catalog-flag-on-buckets': 38, - 'add-search-v2-sort-support': 39, - 'fix-prefix-race-conditions-optimized': 40, - 'add-object-level-update-trigger': 41, - 'rollback-prefix-triggers': 42, - 'fix-object-level': 43, - 'vector-bucket-type': 44, - 'vector-buckets': 45, - 'buckets-objects-grants': 46, - 'iceberg-table-metadata': 47, - 'iceberg-catalog-ids': 48, - 'buckets-objects-grants-postgres': 49, - 'search-v2-optimised': 50, - 'index-backward-compatible-search': 51, - 'drop-not-used-indexes-and-functions': 52, - 'drop-index-lower-name': 53, - 'drop-index-object-level': 54, - 'prevent-direct-deletes': 55, - 'fix-optimized-search-function': 56, - 's3-multipart-uploads-metadata': 57, - } - \ No newline at end of file +export const DBMigration = { + 'create-migrations-table': 0, + initialmigration: 1, + 'storage-schema': 2, + 'pathtoken-column': 3, + 'add-migrations-rls': 4, + 'add-size-functions': 5, + 'change-column-name-in-get-size': 6, + 'add-rls-to-buckets': 7, + 'add-public-to-buckets': 8, + 'fix-search-function': 9, + 'search-files-search-function': 10, + 'add-trigger-to-auto-update-updated_at-column': 11, + 'add-automatic-avif-detection-flag': 12, + 'add-bucket-custom-limits': 13, + 'use-bytes-for-max-size': 14, + 'add-can-insert-object-function': 15, + 'add-version': 16, + 'drop-owner-foreign-key': 17, + add_owner_id_column_deprecate_owner: 18, + 'alter-default-value-objects-id': 19, + 'list-objects-with-delimiter': 20, + 's3-multipart-uploads': 21, + 's3-multipart-uploads-big-ints': 22, + 'optimize-search-function': 23, + 'operation-function': 24, + 'custom-metadata': 25, + 'objects-prefixes': 26, + 'search-v2': 27, + 'object-bucket-name-sorting': 28, + 'create-prefixes': 29, + 'update-object-levels': 30, + 'objects-level-index': 31, + 'backward-compatible-index-on-objects': 32, + 'backward-compatible-index-on-prefixes': 33, + 'optimize-search-function-v1': 34, + 'add-insert-trigger-prefixes': 35, + 'optimise-existing-functions': 36, + 'add-bucket-name-length-trigger': 37, + 'iceberg-catalog-flag-on-buckets': 38, + 'add-search-v2-sort-support': 39, + 'fix-prefix-race-conditions-optimized': 40, + 'add-object-level-update-trigger': 41, + 'rollback-prefix-triggers': 42, + 'fix-object-level': 43, + 'vector-bucket-type': 44, + 'vector-buckets': 45, + 'buckets-objects-grants': 46, + 'iceberg-table-metadata': 47, + 'iceberg-catalog-ids': 48, + 'buckets-objects-grants-postgres': 49, + 'search-v2-optimised': 50, + 'index-backward-compatible-search': 51, + 'drop-not-used-indexes-and-functions': 52, + 'drop-index-lower-name': 53, + 'drop-index-object-level': 54, + 'prevent-direct-deletes': 55, + 'fix-optimized-search-function': 56, + 's3-multipart-uploads-metadata': 57, +} diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index e8e0a98d2..04237537b 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -17,6 +17,8 @@ import app from '../app' import { getConfig } from '../config' import { Storage } from '../storage' import { checkBucketExists } from './common' +import * as tus from 'tus-js-client' +import { DetailedError } from 'tus-js-client' interface Policy { name: string @@ -39,6 +41,7 @@ interface TestCaseAssert { operation: | 'upload' | 'upload.upsert' + | 'upload.tus' | 'bucket.create' | 'bucket.get' | 'bucket.list' @@ -257,8 +260,7 @@ describe('RLS policies', () => { } if (assert.error) { - const body = await response.json() - + const body = response.json() expect(body.message).toBe(assert.error) } } finally { @@ -316,6 +318,8 @@ async function runOperation( return uploadFile(bucket, objectName, jwt, false, userMetadata, mimeType, contentLength) case 'upload.upsert': return uploadFile(bucket, objectName, jwt, true, userMetadata, mimeType, contentLength) + case 'upload.tus': + return tusUploadFile(bucket, objectName, jwt, userMetadata, mimeType, contentLength) case 'bucket.list': return appInstance.inject({ method: 'GET', @@ -501,3 +505,68 @@ async function uploadFile( payload: form, }) } + +async function tusUploadFile( + bucket: string, + objectName: string, + jwt: string, + userMetadata?: Record, + mimeType?: string, + contentLength?: number +) { + if (!appInstance.server.listening) { + await appInstance.listen({ port: 0 }) + } + + const addressInfo = appInstance.server.address() + if (!addressInfo || typeof addressInfo === 'string') { + throw new Error('Unable to resolve local server address') + } + + const localServerAddress = `http://127.0.0.1:${addressInfo.port}` + + const file = fs.createReadStream(path.resolve(__dirname, 'assets', 'sadcat.jpg')) + + let statusCode = 200 + let message = '' + + try { + await new Promise((resolve, reject) => { + const upload = new tus.Upload(file, { + endpoint: `${localServerAddress}/upload/resumable`, + uploadSize: contentLength || undefined, + onShouldRetry: () => false, + uploadDataDuringCreation: false, + headers: { + authorization: `Bearer ${jwt}`, + }, + metadata: { + bucketName: bucket, + objectName: objectName, + contentType: mimeType || 'application/octet-stream', + cacheControl: '3600', + ...(userMetadata ? { metadata: JSON.stringify(userMetadata) } : {}), + }, + onError: function (error) { + console.log('Failed because: ' + error) + reject(error) + }, + onSuccess: () => { + resolve(true) + }, + }) + + upload.start() + }) + } catch (e) { + if (e instanceof DetailedError) { + statusCode = e.originalResponse.getStatus() + message = e.originalResponse.getBody() + } else { + throw e + } + } + + const body = message ? { message } : {} + return { statusCode, body: JSON.stringify(body), json: () => body } +} diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index 4f1fe0abe..300ecf7d7 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -508,6 +508,16 @@ tests: status: 400 error: 'new row violates row-level security policy' + - operation: upload.tus + objectName: 'test_file_tus.jpg' + userMetadata: + department: 'engineering' + status: 200 + + - operation: upload.tus + status: 403 + error: 'new row violates row-level security policy' + - description: 'Will only upload image files based on mimetype' policies: - insert_only_images @@ -523,6 +533,17 @@ tests: status: 400 error: 'new row violates row-level security policy' + - operation: upload.tus + objectName: 'test_image_tus.jpg' + mimeType: 'image/jpeg' + status: 200 + + - operation: upload.tus + objectName: 'test_file_tus.txt' + mimeType: 'text/plain' + status: 403 + error: 'new row violates row-level security policy' + - description: 'Will only upload files under size limit based on contentLength' policies: - insert_max_size_limit @@ -536,3 +557,13 @@ tests: contentLength: 200000 status: 400 error: 'new row violates row-level security policy' + + - operation: upload.tus + objectName: 'small_file_tus.jpg' + status: 200 + + - operation: upload.tus + objectName: 'large_file_tus.jpg' + contentLength: 200000 + status: 403 + error: 'new row violates row-level security policy' From ca2807792bd224be7382e498d1270af4524f584c Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 15:13:47 -0600 Subject: [PATCH 09/22] fix: change usermetdata back to optional --- src/storage/uploader.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 3a36d63cb..7c5a4f4ed 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -27,7 +27,7 @@ export interface UploadRequest { bucketId: string objectName: string file: FileUpload - userMetadata: Record | undefined + userMetadata?: Record owner?: string isUpsert?: boolean uploadType?: 'standard' | 's3' | 'resumable' From 5905794dd84a720a926f3ddb0d7dc60a324df95d Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 15:16:02 -0600 Subject: [PATCH 10/22] fix: add x-metadata header --- src/http/routes/object/getSignedUploadURL.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index bc0d5ada2..15b31492e 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -21,6 +21,7 @@ const getSignedUploadURLHeadersSchema = { type: 'object', properties: { 'x-upsert': { type: 'string' }, + 'x-metadata': { type: 'string' }, 'content-type': { type: 'string' }, 'content-length': { type: 'string' }, authorization: { type: 'string' }, From 7986573c28bd00909d2779971f56bd272387035a Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 15:19:45 -0600 Subject: [PATCH 11/22] docs: add todo comment to validate parseUserMetadata --- src/http/routes/object/getSignedUploadURL.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index 15b31492e..f03984e16 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -78,6 +78,8 @@ export default async function routes(fastify: FastifyInstance) { const customMd = request.headers['x-metadata'] if (typeof customMd === 'string') { + // TODO: parseUserMetadata casts to Record but values could be anything; + // validation should be added in a follow-up userMetadata = parseUserMetadata(customMd) } From 41c2c91cba6881e7409a478c20483e78af361ff8 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 15:48:28 -0600 Subject: [PATCH 12/22] fix: add column guard for findMultipartUpload --- src/storage/database/knex.ts | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 3b9ef206b..65e7a3f81 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -926,6 +926,9 @@ export class StorageKnexDB implements Database { user_metadata: userMetadata, } + // TODO: move this guard into normalizeColumns once it is table-aware. + // metadata was added to s3_multipart_uploads in migration 57 but has existed on + // objects since much earlier, so a table-agnostic rule would incorrectly strip it. if ( !this.latestMigration || DBMigration[this.latestMigration] >= DBMigration['s3-multipart-uploads-metadata'] @@ -945,9 +948,20 @@ export class StorageKnexDB implements Database { async findMultipartUpload(uploadId: string, columns = 'id', options?: { forUpdate?: boolean }) { const multiPart = await this.runQuery('FindMultipartUpload', async (knex, signal) => { + // TODO: move this guard into normalizeColumns once it is table-aware. + // metadata was added to s3_multipart_uploads in migration 57 but has existed on + // objects since much earlier, so a table-agnostic rule would incorrectly strip it. + const hasMetadataColumn = + !this.latestMigration || + DBMigration[this.latestMigration] >= DBMigration['s3-multipart-uploads-metadata'] + + const cols = hasMetadataColumn + ? columns.split(',') + : columns.split(',').filter((col) => col.trim() !== 'metadata') + const query = knex .from('s3_multipart_uploads') - .select(columns.split(',')) + .select(cols) .where('id', uploadId) if (options?.forUpdate) { From 273f87b078dda86f0e23e9f383433d08beac7b69 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 15:59:10 -0600 Subject: [PATCH 13/22] fix: copy object userMetadata --- src/storage/object.ts | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/storage/object.ts b/src/storage/object.ts index 0718bc79a..ca62211ce 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -333,12 +333,20 @@ export class ObjectStorage { ...(fileMetadata || {}), } + const baseUserMetadata = originObject.user_metadata || {} + const destinationUserMetadata = copyMetadata + ? baseUserMetadata + : { + ...baseUserMetadata, + ...(userMetadata || {}), + } + await this.uploader.canUpload({ bucketId: destinationBucket, objectName: destinationKey, owner, isUpsert: upsert, - userMetadata: userMetadata, + userMetadata: destinationUserMetadata, metadata: destinationMetadata, }) @@ -384,7 +392,7 @@ export class ObjectStorage { lastModified: copyResult.lastModified, eTag: copyResult.eTag, }, - user_metadata: copyMetadata ? originObject.user_metadata : userMetadata, + user_metadata: destinationUserMetadata, version: newVersion, }) From 40b2f07022680a1b734199c9e515e14843e89453 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 16:13:56 -0600 Subject: [PATCH 14/22] chore: remoe userMetadata now that type is optional --- src/test/scanner.test.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test/scanner.test.ts b/src/test/scanner.test.ts index d4d81f94d..db2c5a5ee 100644 --- a/src/test/scanner.test.ts +++ b/src/test/scanner.test.ts @@ -24,7 +24,6 @@ describe('ObjectScanner', () => { bucketId: bucket.id, objectName: randomUUID() + `-test-${i}.text`, uploadType: 'standard', - userMetadata: {}, file: { body: Readable.from(Buffer.from('test')), mimeType: 'text/plain', @@ -91,7 +90,6 @@ describe('ObjectScanner', () => { bucketId: bucket.id, objectName: randomUUID() + `-test-${i}.text`, uploadType: 'standard', - userMetadata: {}, file: { body: Readable.from(Buffer.from('test')), mimeType: 'text/plain', From 3ac21b5fcd94a111016b55021793d19d9b8bcb9c Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Wed, 25 Feb 2026 16:31:04 -0600 Subject: [PATCH 15/22] fix: call canUpload before shouldAllorPartUpload --- src/storage/protocols/s3/s3-handler.ts | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index d6de43f75..42172c7da 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -586,17 +586,21 @@ export class S3ProtocolHandler { const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) - const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) + const multipartData = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'version,user_metadata,metadata') await uploader.canUpload({ bucketId: Bucket as string, objectName: Key as string, owner: this.owner, isUpsert: true, - userMetadata: multipart.user_metadata || undefined, - metadata: multipart.metadata || undefined, + userMetadata: multipartData.user_metadata || undefined, + metadata: multipartData.metadata || undefined, }) + const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) + if (signal?.aborted) { throw ERRORS.AbortedTerminate('UploadPart aborted') } @@ -1257,17 +1261,21 @@ export class S3ProtocolHandler { destinationBucket?.file_size_limit ) - const multipart = await this.shouldAllowPartUpload(UploadId, Number(copySize), maxFileSize) + const multipartData = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'version,user_metadata,metadata') await uploader.canUpload({ bucketId: Bucket, objectName: Key, owner: this.owner, isUpsert: true, - userMetadata: multipart.user_metadata || undefined, - metadata: multipart.metadata || undefined, + userMetadata: multipartData.user_metadata || undefined, + metadata: multipartData.metadata || undefined, }) + const multipart = await this.shouldAllowPartUpload(UploadId, Number(copySize), maxFileSize) + const uploadPart = await this.storage.backend.uploadPartCopy( storageS3Bucket, this.storage.location.getKeyLocation({ From a142dd58c2e6245e8a77c192171410bf087a6659 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Thu, 26 Feb 2026 12:38:14 -0600 Subject: [PATCH 16/22] feat: add more RLS tests --- src/test/rls.test.ts | 37 +++++++++++++++++++++++++++++++++++-- src/test/rls_tests.yaml | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 2 deletions(-) diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index 04237537b..4dd83d8a3 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -42,6 +42,7 @@ interface TestCaseAssert { | 'upload' | 'upload.upsert' | 'upload.tus' + | 'upload.signed' | 'bucket.create' | 'bucket.get' | 'bucket.list' @@ -61,6 +62,8 @@ interface TestCaseAssert { userMetadata?: Record mimeType?: string contentLength?: number + copyMetadata?: boolean + destinationObjectName?: string status: number error?: string } @@ -242,6 +245,8 @@ describe('RLS policies', () => { userMetadata: assert.userMetadata, mimeType: assert.mimeType, contentLength: assert.contentLength, + copyMetadata: assert.copyMetadata, + destinationObjectName: assert.destinationObjectName, }) console.log( @@ -309,9 +314,11 @@ async function runOperation( userMetadata?: Record mimeType?: string contentLength?: number + copyMetadata?: boolean + destinationObjectName?: string } ) { - const { jwt, bucket, objectName, userMetadata, mimeType, contentLength } = options + const { jwt, bucket, objectName, userMetadata, mimeType, contentLength, copyMetadata, destinationObjectName } = options switch (operation) { case 'upload': @@ -320,6 +327,8 @@ async function runOperation( return uploadFile(bucket, objectName, jwt, true, userMetadata, mimeType, contentLength) case 'upload.tus': return tusUploadFile(bucket, objectName, jwt, userMetadata, mimeType, contentLength) + case 'upload.signed': + return signUploadUrl(bucket, objectName, jwt, userMetadata) case 'bucket.list': return appInstance.inject({ method: 'GET', @@ -417,11 +426,15 @@ async function runOperation( url: `/object/copy`, headers: { authorization: `Bearer ${jwt}`, + ...(userMetadata + ? { 'x-metadata': Buffer.from(JSON.stringify(userMetadata)).toString('base64') } + : {}), }, payload: { bucketId: bucket, sourceKey: objectName, - destinationKey: 'copied_' + objectName, + destinationKey: destinationObjectName ?? 'copied_' + objectName, + copyMetadata: copyMetadata ?? true, }, }) default: @@ -570,3 +583,23 @@ async function tusUploadFile( const body = message ? { message } : {} return { statusCode, body: JSON.stringify(body), json: () => body } } + +async function signUploadUrl( + bucket: string, + objectName: string, + jwt: string, + userMetadata?: Record +) { + const metadata = userMetadata + ? Buffer.from(JSON.stringify(userMetadata)).toString('base64') + : undefined + + return appInstance.inject({ + method: 'POST', + url: `/object/upload/sign/${bucket}/${objectName}`, + headers: { + authorization: `Bearer ${jwt}`, + ...(metadata ? { 'x-metadata': metadata } : {}), + }, + }) +} diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index 300ecf7d7..762cc094e 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -497,6 +497,7 @@ tests: - description: 'Will only upload files with correct user metadata' policies: - insert_with_metadata_check + - read_only_all_objects asserts: - operation: upload objectName: 'test_file.jpg' @@ -518,6 +519,46 @@ tests: status: 403 error: 'new row violates row-level security policy' + - operation: upload + objectName: 'source_copy_meta.jpg' + userMetadata: + department: 'engineering' + status: 200 + + - operation: object.copy + objectName: 'source_copy_meta.jpg' + destinationObjectName: 'copied_source_copy_meta_1.jpg' + copyMetadata: false + status: 200 + + - operation: object.copy + objectName: 'source_copy_meta.jpg' + destinationObjectName: 'copied_source_copy_meta_2.jpg' + copyMetadata: false + userMetadata: + department: 'engineering' + status: 200 + + - operation: object.copy + objectName: 'source_copy_meta.jpg' + destinationObjectName: 'copied_source_copy_meta_3.jpg' + copyMetadata: false + userMetadata: + department: 'marketing' + status: 400 + error: 'new row violates row-level security policy' + + - operation: upload.signed + objectName: 'signed_file.jpg' + userMetadata: + department: 'engineering' + status: 200 + + - operation: upload.signed + objectName: 'signed_file_no_meta.jpg' + status: 400 + error: 'new row violates row-level security policy' + - description: 'Will only upload image files based on mimetype' policies: - insert_only_images From 738bbea057862fafa8b55a10303933319ecaaa5e Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Thu, 26 Feb 2026 17:12:13 -0600 Subject: [PATCH 17/22] feat: add multipart upload operation and tests for S3 uploads --- src/test/rls.test.ts | 119 ++++++++++++++++++++++++++++++++++++---- src/test/rls_tests.yaml | 10 ++++ 2 files changed, 119 insertions(+), 10 deletions(-) diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index 4dd83d8a3..a124be9d4 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -1,4 +1,11 @@ -import { CreateBucketCommand, S3Client } from '@aws-sdk/client-s3' +import { + CompleteMultipartUploadCommand, + CreateBucketCommand, + CreateMultipartUploadCommand, + S3Client, + S3ServiceException, + UploadPartCommand, +} from '@aws-sdk/client-s3' import { signJWT } from '@internal/auth' import { wait } from '@internal/concurrency' import { getPostgresConnection, getServiceKeyUser } from '@internal/database' @@ -43,6 +50,7 @@ interface TestCaseAssert { | 'upload.upsert' | 'upload.tus' | 'upload.signed' + | 'upload.s3.multipart' | 'bucket.create' | 'bucket.get' | 'bucket.list' @@ -77,8 +85,16 @@ const testSpec = yaml.load( fs.readFileSync(path.resolve(__dirname, 'rls_tests.yaml'), 'utf8') ) as RlsTestSpec -const { serviceKeyAsync, tenantId, jwtSecret, databaseURL, storageS3Bucket, storageBackendType } = - getConfig() +const { + serviceKeyAsync, + anonKeyAsync, + tenantId, + jwtSecret, + databaseURL, + storageS3Bucket, + storageBackendType, + storageS3Region, +} = getConfig() const backend = createStorageBackend(storageBackendType) const client = backend.client let appInstance: FastifyInstance @@ -318,7 +334,16 @@ async function runOperation( destinationObjectName?: string } ) { - const { jwt, bucket, objectName, userMetadata, mimeType, contentLength, copyMetadata, destinationObjectName } = options + const { + jwt, + bucket, + objectName, + userMetadata, + mimeType, + contentLength, + copyMetadata, + destinationObjectName, + } = options switch (operation) { case 'upload': @@ -329,6 +354,8 @@ async function runOperation( return tusUploadFile(bucket, objectName, jwt, userMetadata, mimeType, contentLength) case 'upload.signed': return signUploadUrl(bucket, objectName, jwt, userMetadata) + case 'upload.s3.multipart': + return s3MultipartUpload(bucket, objectName, jwt, userMetadata) case 'bucket.list': return appInstance.inject({ method: 'GET', @@ -572,12 +599,10 @@ async function tusUploadFile( upload.start() }) } catch (e) { - if (e instanceof DetailedError) { - statusCode = e.originalResponse.getStatus() - message = e.originalResponse.getBody() - } else { - throw e - } + if (!(e instanceof DetailedError)) throw e + + statusCode = e.originalResponse.getStatus() + message = e.originalResponse.getBody() } const body = message ? { message } : {} @@ -603,3 +628,77 @@ async function signUploadUrl( }, }) } + +async function s3MultipartUpload( + bucket: string, + objectName: string, + jwt: string, + userMetadata?: Record +) { + if (!appInstance.server.listening) { + await appInstance.listen({ port: 0 }) + } + + const listener = appInstance.server.address() as { port: number } + const anonKey = await anonKeyAsync + const s3Client = new S3Client({ + endpoint: `http://127.0.0.1:${listener.port}/s3`, + forcePathStyle: true, + region: storageS3Region, + credentials: { + accessKeyId: tenantId, + secretAccessKey: anonKey, + sessionToken: jwt, + }, + }) + + let statusCode = 200 + let message = '' + + try { + const s3Metadata = userMetadata + ? Object.fromEntries(Object.entries(userMetadata).map(([k, v]) => [k, String(v)])) + : undefined + + const createResp = await s3Client.send( + new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: objectName, + ContentType: 'image/jpg', + ...(s3Metadata ? { Metadata: s3Metadata } : {}), + }) + ) + + const data = Buffer.alloc(5 * 1024) + const partResp = await s3Client.send( + new UploadPartCommand({ + Bucket: bucket, + Key: objectName, + UploadId: createResp.UploadId, + PartNumber: 1, + Body: data, + ContentLength: data.length, + }) + ) + + await s3Client.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: objectName, + UploadId: createResp.UploadId, + MultipartUpload: { Parts: [{ PartNumber: 1, ETag: partResp.ETag }] }, + }) + ) + + } catch (e: unknown) { + if (!(e instanceof S3ServiceException)) throw e + + statusCode = e.$metadata.httpStatusCode ?? 400 + message = e.message + + } finally { + s3Client.destroy() + } + const body = message ? { message } : {} + return { statusCode, body: JSON.stringify(body), json: () => body } +} diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index 762cc094e..a73170411 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -559,6 +559,16 @@ tests: status: 400 error: 'new row violates row-level security policy' + - operation: upload.s3.multipart + objectName: 's3_multi_file.jpg' + userMetadata: + department: 'engineering' + status: 200 + + - operation: upload.s3.multipart + objectName: 's3_multi_file_no_meta.jpg' + status: 403 + - description: 'Will only upload image files based on mimetype' policies: - insert_only_images From d056d368157a357b643e882859bafbba90b14ffc Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Thu, 26 Feb 2026 17:55:27 -0600 Subject: [PATCH 18/22] test: add RLS upload tests to ensure in_progress_size is not mutated on failure --- src/test/s3-protocol.test.ts | 119 +++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts index f6b994e77..9b421111b 100644 --- a/src/test/s3-protocol.test.ts +++ b/src/test/s3-protocol.test.ts @@ -28,6 +28,9 @@ import { Upload } from '@aws-sdk/lib-storage' import { createPresignedPost } from '@aws-sdk/s3-presigned-post' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' import { wait } from '@internal/concurrency' +import { StorageKnexDB } from '@storage/database' +import { Uploader } from '@storage/uploader' +import { ERRORS } from '@internal/errors' import axios from 'axios' import { createHash, createHmac, randomUUID } from 'crypto' import { FastifyInstance } from 'fastify' @@ -1067,6 +1070,78 @@ describe('S3 Protocol', () => { expect(resp.$metadata).toBeTruthy() }) + + it('does not mutate in_progress_size when canUpload (RLS) fails', async () => { + /* + Calling shouldAllowPartUpload mutates the in_progress_size so we have to ensure + canUpload is called beforehand or else it can cause issues for valid uploads. + + This test sets the fileSizeLimit to 10kb and each part at 5kb. It simulates + first request successful, second failed, third passes. If the in_progress_size + was mutated on the second request it would be at 10kb causing the third to fail. + */ + const bucketName = await createBucket(client) + const key = 'rls-ordering-test.jpg' + const partSize = 1024 * 5 + + mergeConfig({ uploadFileSizeLimit: partSize * 2 }) + + const createResp = await client.send( + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + }) + ) + expect(createResp.UploadId).toBeTruthy() + const uploadId = createResp.UploadId + + const part1Resp = await client.send( + new UploadPartCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + PartNumber: 1, + Body: Buffer.alloc(partSize), + ContentLength: partSize, + }) + ) + expect(part1Resp.ETag).toBeTruthy() + + const canUploadSpy = jest + .spyOn(Uploader.prototype, 'canUpload') + .mockRejectedValueOnce(ERRORS.AccessDenied('upload')) + + try { + await client.send( + new UploadPartCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + PartNumber: 2, + Body: Buffer.alloc(partSize), + ContentLength: partSize, + }) + ) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + } finally { + canUploadSpy.mockRestore() + } + + const part2Resp = await client.send( + new UploadPartCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + PartNumber: 2, + Body: Buffer.alloc(partSize), + ContentLength: partSize, + }) + ) + expect(part2Resp.ETag).toBeTruthy() + }) }) describe('GetObject', () => { @@ -2022,3 +2097,47 @@ describe('S3 Protocol', () => { }) }) }) + +describe('Migration compatibility', () => { + it('findMultipartUpload excludes metadata column when s3-multipart-uploads-metadata migration has not been applied', () => { + // Simulate a DB instance where migration 57 (s3-multipart-uploads-metadata) has NOT been applied + const mockDB = { + latestMigration: 'fix-optimized-search-function', // migration 56 + } as Partial + + // Access the column filtering logic directly by checking what columns would be selected + const { DBMigration } = require('@internal/database/migrations') + const latestMigration = mockDB.latestMigration as keyof typeof DBMigration + + const requestedColumns = 'id,version,user_metadata,metadata'.split(',') + let filteredColumns = requestedColumns + + if ( + latestMigration && + DBMigration[latestMigration] < DBMigration['s3-multipart-uploads-metadata'] + ) { + filteredColumns = filteredColumns.filter((col) => col.trim() !== 'metadata') + } + + expect(filteredColumns).toEqual(['id', 'version', 'user_metadata']) + expect(filteredColumns).not.toContain('metadata') + }) + + it('findMultipartUpload includes metadata column when s3-multipart-uploads-metadata migration has been applied', () => { + const { DBMigration } = require('@internal/database/migrations') + const latestMigration: keyof typeof DBMigration = 's3-multipart-uploads-metadata' + + const requestedColumns = 'id,version,user_metadata,metadata'.split(',') + let filteredColumns = requestedColumns + + if ( + latestMigration && + DBMigration[latestMigration] < DBMigration['s3-multipart-uploads-metadata'] + ) { + filteredColumns = filteredColumns.filter((col) => col.trim() !== 'metadata') + } + + expect(filteredColumns).toEqual(['id', 'version', 'user_metadata', 'metadata']) + expect(filteredColumns).toContain('metadata') + }) +}) From c3119d747840bbcf9054e9ffe0952170516e19a9 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Thu, 26 Feb 2026 18:27:54 -0600 Subject: [PATCH 19/22] fix: formatting --- src/http/routes/object/getSignedUploadURL.ts | 2 +- src/http/routes/tus/lifecycle.ts | 2 +- src/storage/database/knex.ts | 5 +- src/storage/object.ts | 2 +- src/test/rls.test.ts | 10 +- src/test/rls_tests.yaml | 106 +++++++++---------- src/test/s3-protocol.test.ts | 2 +- 7 files changed, 62 insertions(+), 67 deletions(-) diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index f03984e16..0de648d0a 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -1,10 +1,10 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { getConfig } from '../../../config' +import { parseUserMetadata } from '../../../storage/uploader' import { createDefaultSchema } from '../../routes-helper' import { AuthenticatedRequest } from '../../types' import { ROUTE_OPERATIONS } from '../operations' -import { parseUserMetadata } from '../../../storage/uploader' const { uploadSignedUrlExpirationTime } = getConfig() diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index 22b34e643..f28c274fa 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -136,7 +136,7 @@ export async function onIncomingRequest(rawReq: Request, id: string, datastore: userMetadata: customMd, metadata: { mimetype: contentType, - contentLength: contentLength, + contentLength, }, }) } diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 65e7a3f81..439159b88 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -959,10 +959,7 @@ export class StorageKnexDB implements Database { ? columns.split(',') : columns.split(',').filter((col) => col.trim() !== 'metadata') - const query = knex - .from('s3_multipart_uploads') - .select(cols) - .where('id', uploadId) + const query = knex.from('s3_multipart_uploads').select(cols).where('id', uploadId) if (options?.forUpdate) { return query.abortOnSignal(signal).forUpdate().first() diff --git a/src/storage/object.ts b/src/storage/object.ts index ca62211ce..46125d538 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -17,7 +17,7 @@ import { ObjectUpdatedMetadata, } from './events' import { mustBeValidKey } from './limits' -import { fileUploadFromRequest, Uploader, UploadRequest, CanUploadMetadata} from './uploader' +import { CanUploadMetadata, fileUploadFromRequest, Uploader, UploadRequest } from './uploader' const { requestUrlLengthLimit } = getConfig() diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index a124be9d4..15f246971 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -20,12 +20,12 @@ import yaml from 'js-yaml' import { Knex, knex } from 'knex' import Mustache from 'mustache' import path from 'path' +import * as tus from 'tus-js-client' +import { DetailedError } from 'tus-js-client' import app from '../app' import { getConfig } from '../config' import { Storage } from '../storage' import { checkBucketExists } from './common' -import * as tus from 'tus-js-client' -import { DetailedError } from 'tus-js-client' interface Policy { name: string @@ -582,12 +582,12 @@ async function tusUploadFile( }, metadata: { bucketName: bucket, - objectName: objectName, + objectName, contentType: mimeType || 'application/octet-stream', cacheControl: '3600', ...(userMetadata ? { metadata: JSON.stringify(userMetadata) } : {}), }, - onError: function (error) { + onError(error) { console.log('Failed because: ' + error) reject(error) }, @@ -689,13 +689,11 @@ async function s3MultipartUpload( MultipartUpload: { Parts: [{ PartNumber: 1, ETag: partResp.ETag }] }, }) ) - } catch (e: unknown) { if (!(e instanceof S3ServiceException)) throw e statusCode = e.$metadata.httpStatusCode ?? 400 message = e.message - } finally { s3Client.destroy() } diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index a73170411..1dce98518 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -48,21 +48,21 @@ policies: content: "USING(owner = '{{uid}}')" - name: insert_with_metadata_check - tables: ['storage.objects'] - roles: ['authenticated'] - permissions: ['insert'] + tables: ["storage.objects"] + roles: ["authenticated"] + permissions: ["insert"] content: "WITH CHECK(user_metadata->>'department' = 'engineering')" - name: insert_only_images - tables: ['storage.objects'] - roles: ['authenticated'] - permissions: ['insert'] + tables: ["storage.objects"] + roles: ["authenticated"] + permissions: ["insert"] content: "WITH CHECK(metadata->>'mimetype' LIKE 'image/%')" - name: insert_max_size_limit - tables: ['storage.objects'] - roles: ['authenticated'] - permissions: ['insert'] + tables: ["storage.objects"] + roles: ["authenticated"] + permissions: ["insert"] content: "WITH CHECK((metadata->>'contentLength')::int <= 100000)" tests: @@ -492,129 +492,129 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket not found' + error: "Bucket not found" - - description: 'Will only upload files with correct user metadata' + - description: "Will only upload files with correct user metadata" policies: - insert_with_metadata_check - read_only_all_objects asserts: - operation: upload - objectName: 'test_file.jpg' + objectName: "test_file.jpg" userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: upload status: 400 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload.tus - objectName: 'test_file_tus.jpg' + objectName: "test_file_tus.jpg" userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: upload.tus status: 403 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload - objectName: 'source_copy_meta.jpg' + objectName: "source_copy_meta.jpg" userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: object.copy - objectName: 'source_copy_meta.jpg' - destinationObjectName: 'copied_source_copy_meta_1.jpg' + objectName: "source_copy_meta.jpg" + destinationObjectName: "copied_source_copy_meta_1.jpg" copyMetadata: false status: 200 - operation: object.copy - objectName: 'source_copy_meta.jpg' - destinationObjectName: 'copied_source_copy_meta_2.jpg' + objectName: "source_copy_meta.jpg" + destinationObjectName: "copied_source_copy_meta_2.jpg" copyMetadata: false userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: object.copy - objectName: 'source_copy_meta.jpg' - destinationObjectName: 'copied_source_copy_meta_3.jpg' + objectName: "source_copy_meta.jpg" + destinationObjectName: "copied_source_copy_meta_3.jpg" copyMetadata: false userMetadata: - department: 'marketing' + department: "marketing" status: 400 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload.signed - objectName: 'signed_file.jpg' + objectName: "signed_file.jpg" userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: upload.signed - objectName: 'signed_file_no_meta.jpg' + objectName: "signed_file_no_meta.jpg" status: 400 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload.s3.multipart - objectName: 's3_multi_file.jpg' + objectName: "s3_multi_file.jpg" userMetadata: - department: 'engineering' + department: "engineering" status: 200 - operation: upload.s3.multipart - objectName: 's3_multi_file_no_meta.jpg' + objectName: "s3_multi_file_no_meta.jpg" status: 403 - - description: 'Will only upload image files based on mimetype' + - description: "Will only upload image files based on mimetype" policies: - insert_only_images asserts: - operation: upload - objectName: 'test_image.jpg' - mimeType: 'image/jpeg' + objectName: "test_image.jpg" + mimeType: "image/jpeg" status: 200 - operation: upload - objectName: 'test_file.txt' - mimeType: 'text/plain' + objectName: "test_file.txt" + mimeType: "text/plain" status: 400 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload.tus - objectName: 'test_image_tus.jpg' - mimeType: 'image/jpeg' + objectName: "test_image_tus.jpg" + mimeType: "image/jpeg" status: 200 - operation: upload.tus - objectName: 'test_file_tus.txt' - mimeType: 'text/plain' + objectName: "test_file_tus.txt" + mimeType: "text/plain" status: 403 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - - description: 'Will only upload files under size limit based on contentLength' + - description: "Will only upload files under size limit based on contentLength" policies: - insert_max_size_limit asserts: - operation: upload - objectName: 'small_file.jpg' + objectName: "small_file.jpg" status: 200 - operation: upload - objectName: 'large_file.jpg' + objectName: "large_file.jpg" contentLength: 200000 status: 400 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" - operation: upload.tus - objectName: 'small_file_tus.jpg' + objectName: "small_file_tus.jpg" status: 200 - operation: upload.tus - objectName: 'large_file_tus.jpg' + objectName: "large_file_tus.jpg" contentLength: 200000 status: 403 - error: 'new row violates row-level security policy' + error: "new row violates row-level security policy" diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts index 9b421111b..b39b8e0f6 100644 --- a/src/test/s3-protocol.test.ts +++ b/src/test/s3-protocol.test.ts @@ -28,9 +28,9 @@ import { Upload } from '@aws-sdk/lib-storage' import { createPresignedPost } from '@aws-sdk/s3-presigned-post' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' import { wait } from '@internal/concurrency' +import { ERRORS } from '@internal/errors' import { StorageKnexDB } from '@storage/database' import { Uploader } from '@storage/uploader' -import { ERRORS } from '@internal/errors' import axios from 'axios' import { createHash, createHmac, randomUUID } from 'crypto' import { FastifyInstance } from 'fastify' From 56157e3c4a5f0a00fe5f1ca3209e7ffc14744917 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Thu, 26 Feb 2026 19:04:28 -0600 Subject: [PATCH 20/22] fix: incorrectly copying userMetadata --- src/storage/object.ts | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/storage/object.ts b/src/storage/object.ts index 46125d538..1529919e5 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -333,20 +333,14 @@ export class ObjectStorage { ...(fileMetadata || {}), } - const baseUserMetadata = originObject.user_metadata || {} - const destinationUserMetadata = copyMetadata - ? baseUserMetadata - : { - ...baseUserMetadata, - ...(userMetadata || {}), - } + const destinationUserMetadata = copyMetadata ? originObject.user_metadata : userMetadata await this.uploader.canUpload({ bucketId: destinationBucket, objectName: destinationKey, owner, isUpsert: upsert, - userMetadata: destinationUserMetadata, + userMetadata: destinationUserMetadata || undefined, metadata: destinationMetadata, }) From 8ae3a9a89b5bfc20ea03830e091b22277955b214 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Sat, 28 Feb 2026 08:40:52 -0600 Subject: [PATCH 21/22] test(s3-multipart): remove unit tests for integration tests --- src/test/s3-protocol.test.ts | 155 +++++++++++++++++++++++++++-------- 1 file changed, 120 insertions(+), 35 deletions(-) diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts index b39b8e0f6..346bcf702 100644 --- a/src/test/s3-protocol.test.ts +++ b/src/test/s3-protocol.test.ts @@ -28,6 +28,8 @@ import { Upload } from '@aws-sdk/lib-storage' import { createPresignedPost } from '@aws-sdk/s3-presigned-post' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' import { wait } from '@internal/concurrency' +import { getPostgresConnection, getServiceKeyUser, TenantConnection } from '@internal/database' +import { DBMigration } from '@internal/database/migrations' import { ERRORS } from '@internal/errors' import { StorageKnexDB } from '@storage/database' import { Uploader } from '@storage/uploader' @@ -2099,45 +2101,128 @@ describe('S3 Protocol', () => { }) describe('Migration compatibility', () => { - it('findMultipartUpload excludes metadata column when s3-multipart-uploads-metadata migration has not been applied', () => { - // Simulate a DB instance where migration 57 (s3-multipart-uploads-metadata) has NOT been applied - const mockDB = { - latestMigration: 'fix-optimized-search-function', // migration 56 - } as Partial - - // Access the column filtering logic directly by checking what columns would be selected - const { DBMigration } = require('@internal/database/migrations') - const latestMigration = mockDB.latestMigration as keyof typeof DBMigration - - const requestedColumns = 'id,version,user_metadata,metadata'.split(',') - let filteredColumns = requestedColumns - - if ( - latestMigration && - DBMigration[latestMigration] < DBMigration['s3-multipart-uploads-metadata'] - ) { - filteredColumns = filteredColumns.filter((col) => col.trim() !== 'metadata') - } + describe('integration', () => { + const { tenantId } = getConfig() + let connection: TenantConnection + let bucketId: string - expect(filteredColumns).toEqual(['id', 'version', 'user_metadata']) - expect(filteredColumns).not.toContain('metadata') - }) + beforeAll(async () => { + const adminUser = await getServiceKeyUser(tenantId) + connection = await getPostgresConnection({ + tenantId, + user: adminUser, + superUser: adminUser, + host: 'localhost', + }) - it('findMultipartUpload includes metadata column when s3-multipart-uploads-metadata migration has been applied', () => { - const { DBMigration } = require('@internal/database/migrations') - const latestMigration: keyof typeof DBMigration = 's3-multipart-uploads-metadata' + bucketId = randomUUID() + const db = new StorageKnexDB(connection, { tenantId, host: 'localhost' }) + await db.createBucket({ id: bucketId, name: `migration-test-${bucketId}`, public: false }) + }) - const requestedColumns = 'id,version,user_metadata,metadata'.split(',') - let filteredColumns = requestedColumns + afterAll(async () => { + const db = new StorageKnexDB(connection, { tenantId, host: 'localhost' }) + await db.deleteBucket(bucketId) + await connection.dispose() + }) - if ( - latestMigration && - DBMigration[latestMigration] < DBMigration['s3-multipart-uploads-metadata'] - ) { - filteredColumns = filteredColumns.filter((col) => col.trim() !== 'metadata') - } + const makeDB = (latestMigration?: keyof typeof DBMigration) => + new StorageKnexDB(connection, { tenantId, host: 'localhost', latestMigration }) - expect(filteredColumns).toEqual(['id', 'version', 'user_metadata', 'metadata']) - expect(filteredColumns).toContain('metadata') + describe('createMultipartUpload', () => { + it('does not store metadata when latestMigration is before s3-multipart-uploads-metadata', async () => { + const db = makeDB('fix-optimized-search-function') // migration 56 + const uploadId = randomUUID() + try { + const result = await db.createMultipartUpload( + uploadId, + bucketId, + 'test-pre-migration.txt', + randomUUID(), + 'sig', + undefined, + undefined, + { + cacheControl: 'no-cache', + contentLength: 0, + size: 0, + mimetype: 'text/plain', + eTag: 'abc', + } + ) + expect(result.metadata).toBeNull() + } finally { + await makeDB().deleteMultipartUpload(uploadId) + } + }) + + it('stores metadata when latestMigration is s3-multipart-uploads-metadata', async () => { + const db = makeDB('s3-multipart-uploads-metadata') // migration 57 + const uploadId = randomUUID() + const metadata = { + cacheControl: 'no-cache', + contentLength: 0, + size: 0, + mimetype: 'text/plain', + eTag: 'abc', + } + try { + const result = await db.createMultipartUpload( + uploadId, + bucketId, + 'test-post-migration.txt', + randomUUID(), + 'sig', + undefined, + undefined, + metadata + ) + expect(result.metadata).toEqual(metadata) + } finally { + await makeDB().deleteMultipartUpload(uploadId) + } + }) + }) + + describe('findMultipartUpload', () => { + let uploadId: string + + beforeAll(async () => { + uploadId = randomUUID() + const db = makeDB('s3-multipart-uploads-metadata') + await db.createMultipartUpload( + uploadId, + bucketId, + 'test-find.txt', + randomUUID(), + 'sig', + undefined, + undefined, + { + cacheControl: 'no-cache', + contentLength: 0, + size: 0, + mimetype: 'text/plain', + eTag: 'abc', + } + ) + }) + + afterAll(async () => { + await makeDB().deleteMultipartUpload(uploadId) + }) + + it('excludes metadata from result when latestMigration is before s3-multipart-uploads-metadata', async () => { + const db = makeDB('fix-optimized-search-function') // migration 56 + const result = await db.findMultipartUpload(uploadId, 'id,version,metadata') + expect(result).not.toHaveProperty('metadata') + }) + + it('includes metadata in result when latestMigration is s3-multipart-uploads-metadata', async () => { + const db = makeDB('s3-multipart-uploads-metadata') // migration 57 + const result = await db.findMultipartUpload(uploadId, 'id,version,metadata') + expect(result).toHaveProperty('metadata') + }) + }) }) }) From a473d9b9559e9188f49dc18e6e0d1674ee099480 Mon Sep 17 00:00:00 2001 From: Tyler Hillery Date: Sat, 28 Feb 2026 09:03:00 -0600 Subject: [PATCH 22/22] fix: rls copy metadata test was wrong --- src/test/rls_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index 1dce98518..2cffb64f3 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -528,7 +528,7 @@ tests: - operation: object.copy objectName: "source_copy_meta.jpg" destinationObjectName: "copied_source_copy_meta_1.jpg" - copyMetadata: false + copyMetadata: true status: 200 - operation: object.copy