diff --git a/src/config.ts b/src/config.ts index ed95ac9c..47e3e987 100644 --- a/src/config.ts +++ b/src/config.ts @@ -1,6 +1,6 @@ import dotenv from 'dotenv' -type StorageBackendType = 'file' | 's3' +export type StorageBackendType = 'file' | 's3' type StorageConfigType = { version: string @@ -12,7 +12,7 @@ type StorageConfigType = { encryptionKey: string fileSizeLimit: number fileStoragePath?: string - globalS3Protocol?: 'http' | 'https' | string + globalS3Protocol: 'http' | 'https' globalS3MaxSockets?: number globalS3Bucket: string globalS3Endpoint?: string @@ -111,7 +111,9 @@ export function getConfig(): StorageConfigType { fileSizeLimit: Number(getConfigFromEnv('FILE_SIZE_LIMIT')), fileStoragePath: getOptionalConfigFromEnv('FILE_STORAGE_BACKEND_PATH'), globalS3MaxSockets: parseInt(getOptionalConfigFromEnv('GLOBAL_S3_MAX_SOCKETS') || '200', 10), - globalS3Protocol: getOptionalConfigFromEnv('GLOBAL_S3_PROTOCOL') || 'https', + globalS3Protocol: (getOptionalConfigFromEnv('GLOBAL_S3_PROTOCOL') || 'https') as + | 'http' + | 'https', globalS3Bucket: getConfigFromEnv('GLOBAL_S3_BUCKET'), globalS3Endpoint: getOptionalConfigFromEnv('GLOBAL_S3_ENDPOINT'), globalS3ForcePathStyle: getOptionalConfigFromEnv('GLOBAL_S3_FORCE_PATH_STYLE') === 'true', diff --git a/src/database/connection.ts b/src/database/connection.ts index cac134d5..aada5c2d 100644 --- a/src/database/connection.ts +++ b/src/database/connection.ts @@ -85,24 +85,22 @@ export class TenantConnection { acquireConnectionTimeout: databaseConnectionTimeout, }) - DbActivePool.inc({ tenant_id: options.tenantId, is_external: isExternalPool.toString() }) + DbActivePool.inc({ is_external: isExternalPool.toString() }) knexPool.client.pool.on('createSuccess', () => { DbActiveConnection.inc({ - tenant_id: options.tenantId, is_external: isExternalPool.toString(), }) }) knexPool.client.pool.on('destroySuccess', () => { DbActiveConnection.dec({ - tenant_id: options.tenantId, is_external: isExternalPool.toString(), }) }) knexPool.client.pool.on('poolDestroySuccess', () => { - DbActivePool.dec({ tenant_id: options.tenantId, is_external: isExternalPool.toString() }) + DbActivePool.dec({ is_external: isExternalPool.toString() }) }) if (!isExternalPool) { diff --git a/src/http/plugins/metrics.ts b/src/http/plugins/metrics.ts index 0b8145c4..69841ca5 100644 --- a/src/http/plugins/metrics.ts +++ b/src/http/plugins/metrics.ts @@ -1,9 +1,9 @@ import fastifyPlugin from 'fastify-plugin' -import { MetricsRegistrar, RequestErrors } from '../../monitoring/metrics' +import { MetricsRegistrar } from '../../monitoring/metrics' import fastifyMetrics from 'fastify-metrics' import { getConfig } from '../../config' -const { region, enableDefaultMetrics } = getConfig() +const { region, enableDefaultMetrics, metricsRoutes } = getConfig() interface MetricsOptions { enabledEndpoint?: boolean @@ -22,7 +22,7 @@ export const metrics = ({ enabledEndpoint }: MetricsOptions) => }, }, routeMetrics: { - enabled: enableDefaultMetrics, + enabled: metricsRoutes, overrides: { summary: { name: 'storage_api_http_request_summary_seconds', @@ -33,26 +33,6 @@ export const metrics = ({ enabledEndpoint }: MetricsOptions) => }, registeredRoutesOnly: true, groupStatusCodes: true, - customLabels: { - tenant_id: (req) => { - return req.tenantId - }, - }, }, }) - - // Errors - fastify.addHook('onResponse', async (request, reply) => { - const error = (reply.raw as any).executionError || reply.executionError - - if (error) { - RequestErrors.inc({ - name: error.name || error.constructor.name, - tenant_id: request.tenantId, - path: request.routerPath, - method: request.routerMethod, - status: reply.statusCode, - }) - } - }) }) diff --git a/src/http/plugins/storage.ts b/src/http/plugins/storage.ts index 6c27665a..abb69ac5 100644 --- a/src/http/plugins/storage.ts +++ b/src/http/plugins/storage.ts @@ -2,6 +2,7 @@ import fastifyPlugin from 'fastify-plugin' import { StorageBackendAdapter, createStorageBackend } from '../../storage/backend' import { Storage } from '../../storage' import { StorageKnexDB } from '../../storage/database' +import { getConfig } from '../../config' declare module 'fastify' { interface FastifyRequest { @@ -10,8 +11,10 @@ declare module 'fastify' { } } +const { storageBackendType, metricsDbPerformance } = getConfig() + export const storage = fastifyPlugin(async (fastify) => { - const storageBackend = createStorageBackend() + const storageBackend = createStorageBackend(storageBackendType) fastify.decorateRequest('storage', undefined) fastify.addHook('preHandler', async (request) => { @@ -19,6 +22,7 @@ export const storage = fastifyPlugin(async (fastify) => { tenantId: request.tenantId, host: request.headers['x-forwarded-host'] as string, reqId: request.id, + enableMetrics: metricsDbPerformance, }) request.backend = storageBackend request.storage = new Storage(storageBackend, database) diff --git a/src/http/routes/tus/s3-store.ts b/src/http/routes/tus/s3-store.ts index 40712055..7f16caf2 100644 --- a/src/http/routes/tus/s3-store.ts +++ b/src/http/routes/tus/s3-store.ts @@ -53,11 +53,8 @@ export class S3Store extends BaseS3Store { const timer = S3UploadPart.startTimer() const result = await super.uploadPart(metadata, readStream, partNumber) - const resource = UploadId.fromString(metadata.file.id) - timer({ - tenant_id: resource.tenant, - }) + timer() return result } diff --git a/src/monitoring/metrics.ts b/src/monitoring/metrics.ts index ac090985..52f87fa8 100644 --- a/src/monitoring/metrics.ts +++ b/src/monitoring/metrics.ts @@ -6,71 +6,65 @@ export const MetricsRegistrar = new Registry() export const FileUploadStarted = new client.Gauge({ name: 'storage_api_upload_started', help: 'Upload started', - labelNames: ['tenant_id', 'region', 'is_multipart'], + labelNames: ['region', 'is_multipart'], }) export const FileUploadedSuccess = new client.Gauge({ name: 'storage_api_upload_success', help: 'Successful uploads', - labelNames: ['tenant_id', 'region', 'is_multipart'], + labelNames: ['region', 'is_multipart'], }) export const DbQueryPerformance = new client.Histogram({ name: 'storage_api_database_query_performance', help: 'Database query performance', - labelNames: ['tenant_id', 'region', 'name'], -}) - -export const RequestErrors = new client.Gauge({ - name: 'storage_api_request_errors', - labelNames: ['tenant_id', 'region', 'method', 'path', 'status', 'name'], - help: 'Response Errors', + labelNames: ['region', 'name'], }) export const QueueJobSchedulingTime = new client.Histogram({ name: 'storage_api_queue_job_scheduled_time', help: 'Time taken to schedule a job in the queue', - labelNames: ['region', 'name', 'tenant_id'], + labelNames: ['region', 'name'], }) export const QueueJobScheduled = new client.Gauge({ name: 'storage_api_queue_job_scheduled', help: 'Current number of pending messages in the queue', - labelNames: ['region', 'name', 'tenant_id'], + labelNames: ['region', 'name'], }) export const QueueJobCompleted = new client.Gauge({ name: 'storage_api_queue_job_completed', help: 'Current number of processed messages in the queue', - labelNames: ['tenant_id', 'region', 'name'], + labelNames: ['region', 'name'], }) export const QueueJobRetryFailed = new client.Gauge({ name: 'storage_api_queue_job_retry_failed', help: 'Current number of failed attempts messages in the queue', - labelNames: ['tenant_id', 'region', 'name'], + labelNames: ['region', 'name'], }) export const QueueJobError = new client.Gauge({ name: 'storage_api_queue_job_error', help: 'Current number of errored messages in the queue', - labelNames: ['tenant_id', 'region', 'name'], + labelNames: ['region', 'name'], }) export const S3UploadPart = new client.Histogram({ name: 'storage_api_s3_upload_part', help: 'S3 upload part performance', - labelNames: ['tenant_id', 'region'], + labelNames: ['region'], }) export const DbActivePool = new client.Gauge({ name: 'storage_api_db_pool', help: 'Number of database pools created', - labelNames: ['tenant_id', 'region', 'is_external'], + labelNames: ['region', 'is_external'], }) export const DbActiveConnection = new client.Gauge({ name: 'storage_api_db_connections', help: 'Number of database connections', - labelNames: ['tenant_id', 'region', 'is_external'], + labelNames: ['region', 'is_external'], }) diff --git a/src/queue/events/base-event.ts b/src/queue/events/base-event.ts index e5c82dcc..0643bc10 100644 --- a/src/queue/events/base-event.ts +++ b/src/queue/events/base-event.ts @@ -4,7 +4,7 @@ import { getServiceKeyUser } from '../../database/tenant' import { getPostgresConnection } from '../../database' import { Storage } from '../../storage' import { StorageKnexDB } from '../../storage/database' -import { createStorageBackend } from '../../storage/backend' +import { createAgent, createStorageBackend } from '../../storage/backend' import { getConfig } from '../../config' import { QueueJobScheduled, QueueJobSchedulingTime } from '../../monitoring/metrics' import { logger } from '../../monitoring' @@ -20,7 +20,8 @@ export interface BasePayload { export type StaticThis = { new (...args: any): T } -const { enableQueueEvents } = getConfig() +const { enableQueueEvents, storageBackendType, globalS3Protocol } = getConfig() +const httpAgent = createAgent(globalS3Protocol) export abstract class BaseEvent> { public static readonly version: string = 'v1' @@ -113,7 +114,9 @@ export abstract class BaseEvent> { host: payload.tenant.host, }) - const storageBackend = createStorageBackend() + const storageBackend = createStorageBackend(storageBackendType, { + httpAgent, + }) return new Storage(storageBackend, db) } @@ -145,12 +148,10 @@ export abstract class BaseEvent> { timer({ name: constructor.getQueueName(), - tenant_id: this.payload.tenant.ref, }) QueueJobScheduled.inc({ name: constructor.getQueueName(), - tenant_id: this.payload.tenant.ref, }) return res diff --git a/src/queue/queue.ts b/src/queue/queue.ts index 0d43a9be..513fc2e2 100644 --- a/src/queue/queue.ts +++ b/src/queue/queue.ts @@ -87,14 +87,12 @@ export abstract class Queue { const res = await event.handle(job) QueueJobCompleted.inc({ - tenant_id: job.data.tenant.ref, name: event.getQueueName(), }) return res } catch (e) { QueueJobRetryFailed.inc({ - tenant_id: job.data.tenant.ref, name: event.getQueueName(), }) @@ -106,7 +104,6 @@ export abstract class Queue { } if (dbJob.retrycount === dbJob.retrylimit) { QueueJobError.inc({ - tenant_id: job.data.tenant.ref, name: event.getQueueName(), }) } diff --git a/src/storage/backend/index.ts b/src/storage/backend/index.ts index e438c08e..44b398f9 100644 --- a/src/storage/backend/index.ts +++ b/src/storage/backend/index.ts @@ -1,21 +1,34 @@ import { StorageBackendAdapter } from './generic' import { FileBackend } from './file' -import { S3Backend } from './s3' -import { getConfig } from '../../config' +import { S3Backend, S3ClientOptions } from './s3' +import { getConfig, StorageBackendType } from '../../config' export * from './s3' export * from './file' export * from './generic' -const { region, globalS3Endpoint, globalS3ForcePathStyle, storageBackendType } = getConfig() +const { region, globalS3Endpoint, globalS3ForcePathStyle } = getConfig() -export function createStorageBackend() { +type ConfigForStorage = Type extends 's3' + ? S3ClientOptions + : undefined + +export function createStorageBackend( + type: Type, + config?: ConfigForStorage +) { let storageBackend: StorageBackendAdapter - if (storageBackendType === 'file') { + if (type === 'file') { storageBackend = new FileBackend() } else { - storageBackend = new S3Backend(region, globalS3Endpoint, globalS3ForcePathStyle) + const defaultOptions: S3ClientOptions = { + region: region, + endpoint: globalS3Endpoint, + forcePathStyle: globalS3ForcePathStyle, + ...(config ? config : {}), + } + storageBackend = new S3Backend(defaultOptions) } return storageBackend diff --git a/src/storage/backend/s3.ts b/src/storage/backend/s3.ts index d1df5d22..9f2ea01a 100644 --- a/src/storage/backend/s3.ts +++ b/src/storage/backend/s3.ts @@ -25,6 +25,31 @@ import Agent, { HttpsAgent } from 'agentkeepalive' const { globalS3Protocol, globalS3MaxSockets } = getConfig() +/** + * Creates an agent for the given protocol + * @param protocol + */ +export function createAgent(protocol: 'http' | 'https') { + const agentOptions = { + maxSockets: globalS3MaxSockets, + keepAlive: true, + } + + return protocol === 'http' + ? { httpAgent: new Agent(agentOptions) } + : { httpsAgent: new HttpsAgent(agentOptions) } +} + +export interface S3ClientOptions { + endpoint?: string + region?: string + forcePathStyle?: boolean + accessKey?: string + secretKey?: string + role?: string + httpAgent?: { httpAgent: Agent } | { httpsAgent: HttpsAgent } +} + /** * S3Backend * Interacts with an s3-compatible file system with this S3Adapter @@ -32,28 +57,20 @@ const { globalS3Protocol, globalS3MaxSockets } = getConfig() export class S3Backend implements StorageBackendAdapter { client: S3Client - constructor(region: string, endpoint?: string | undefined, globalS3ForcePathStyle?: boolean) { - const agentOptions = { - maxSockets: globalS3MaxSockets, - keepAlive: true, - } - - const agent = - globalS3Protocol === 'http' - ? { httpAgent: new Agent(agentOptions) } - : { httpsAgent: new HttpsAgent(agentOptions) } + constructor(options: S3ClientOptions) { + const agent = options.httpAgent ? options.httpAgent : createAgent(globalS3Protocol) const params: S3ClientConfig = { - region, + region: options.region, runtime: 'node', requestHandler: new NodeHttpHandler({ ...agent, }), } - if (endpoint) { - params.endpoint = endpoint + if (options.endpoint) { + params.endpoint = options.endpoint } - if (globalS3ForcePathStyle) { + if (options.forcePathStyle) { params.forcePathStyle = true } this.client = new S3Client(params) diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 1354c377..f813a45e 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -469,7 +469,6 @@ export class StorageKnexDB implements Database { ): Promise>> { const timer = DbQueryPerformance.startTimer({ name: queryName, - tenant_id: this.options.tenantId, }) let tnx = this.options.tnx diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index fdc421c1..1ca50ca0 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -70,7 +70,6 @@ export class Uploader { async prepareUpload(options: UploadObjectOptions) { await this.canUpload(options) FileUploadStarted.inc({ - tenant_id: this.db.tenantId, is_multipart: Boolean(options.isMultipart).toString(), }) @@ -208,7 +207,6 @@ export class Uploader { await Promise.all(events) FileUploadedSuccess.inc({ - tenant_id: db.tenantId, is_multipart: Boolean(isMultipart).toString(), }) diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index a97c11f4..56d42023 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -66,8 +66,9 @@ const testSpec = yaml.load( fs.readFileSync(path.resolve(__dirname, 'rls_tests.yaml'), 'utf8') ) as RlsTestSpec -const { serviceKey, tenantId, jwtSecret, databaseURL, globalS3Bucket } = getConfig() -const backend = createStorageBackend() +const { serviceKey, tenantId, jwtSecret, databaseURL, globalS3Bucket, storageBackendType } = + getConfig() +const backend = createStorageBackend(storageBackendType) const client = backend.client jest.setTimeout(10000) diff --git a/src/test/tus.test.ts b/src/test/tus.test.ts index ce046d54..a01fee62 100644 --- a/src/test/tus.test.ts +++ b/src/test/tus.test.ts @@ -18,11 +18,11 @@ import { DetailedError } from 'tus-js-client' import { getServiceKeyUser } from '../database/tenant' import { checkBucketExists } from './common' -const { serviceKey, tenantId, globalS3Bucket } = getConfig() +const { serviceKey, tenantId, globalS3Bucket, storageBackendType } = getConfig() const oneChunkFile = fs.createReadStream(path.resolve(__dirname, 'assets', 'sadcat.jpg')) const localServerAddress = 'http://127.0.0.1:8999' -const backend = createStorageBackend() +const backend = createStorageBackend(storageBackendType) const client = backend.client describe('Tus multipart', () => {