diff --git a/src/blob-api.js b/src/blob-api.js index 00ac3df5..59632992 100644 --- a/src/blob-api.js +++ b/src/blob-api.js @@ -29,16 +29,16 @@ export class BlobApi { * @returns {Promise} */ async getUrl(blobId) { - const { driveId, type, variant, name } = blobId + const { driveDiscoveryId, type, variant, name } = blobId const port = await getPort(this.blobServer.server) - return `http://127.0.0.1:${port}/${this.projectId}/${driveId}/${type}/${variant}/${name}` + return `http://127.0.0.1:${port}/${this.projectId}/${driveDiscoveryId}/${type}/${variant}/${name}` } /** * Write blobs for provided variants of a file * @param {{ original: string, preview?: string, thumbnail?: string }} filepaths * @param {{ mimeType: string }} metadata - * @returns {Promise<{ driveId: string, name: string, type: 'photo' | 'video' | 'audio', hash: string }>} + * @returns {Promise<{ driveDiscoveryId: string, name: string, type: 'photo' | 'video' | 'audio', hash: string }>} */ async create(filepaths, metadata) { const { original, preview, thumbnail } = filepaths @@ -86,7 +86,7 @@ export class BlobApi { } return { - driveId: this.blobStore.writerDriveId, + driveDiscoveryId: this.blobStore.writerDriveDiscoveryId, name, type: blobType, hash: contentHash.digest('hex'), @@ -95,7 +95,7 @@ export class BlobApi { /** * @param {string} filepath - * @param {Omit} options + * @param {Omit} options * @param {object} metadata * @param {string} metadata.mimeType * @param {import('node:crypto').Hash} [hash] @@ -107,7 +107,7 @@ export class BlobApi { fs.createReadStream(filepath), hash, - // @ts-ignore TODO: remove driveId property from createWriteStream + // @ts-ignore TODO: remove driveDiscoveryId property from createWriteStream this.blobStore.createWriteStream({ type, variant, name }, { metadata }) ) diff --git a/src/blob-server/fastify-plugin.js b/src/blob-server/fastify-plugin.js index dc05bcf8..b8e174d9 100644 --- a/src/blob-server/fastify-plugin.js +++ b/src/blob-server/fastify-plugin.js @@ -29,7 +29,7 @@ const HEX_STRING_32_BYTES = T.String({ pattern: HEX_REGEX_32_BYTES }) const PARAMS_JSON_SCHEMA = T.Object({ projectId: HEX_STRING_32_BYTES, - driveId: HEX_STRING_32_BYTES, + driveDiscoveryId: HEX_STRING_32_BYTES, type: T.Union( BLOB_TYPES.map((type) => { return T.Literal(type) @@ -57,7 +57,7 @@ async function routes(fastify, options) { const { getBlobStore } = options fastify.get( - '/:projectId/:driveId/:type/:variant/:name', + '/:projectId/:driveDiscoveryId/:type/:variant/:name', { schema: { params: PARAMS_JSON_SCHEMA } }, async (request, reply) => { const { projectId, ...blobId } = request.params @@ -68,7 +68,7 @@ async function routes(fastify, options) { `Unsupported variant "${blobId.variant}" for ${blobId.type}` ) } - const { driveId } = blobId + const { driveDiscoveryId: driveDiscoveryId } = blobId let blobStore try { @@ -95,7 +95,10 @@ async function routes(fastify, options) { let blobStream try { - blobStream = await blobStore.createEntryReadStream(driveId, entry) + blobStream = await blobStore.createEntryReadStream( + driveDiscoveryId, + entry + ) } catch (e) { reply.code(404) throw e @@ -110,9 +113,13 @@ async function routes(fastify, options) { reply.header('Content-Type', metadata.mimeType) } else { // Attempt to guess the MIME type based on the blob contents - const blobSlice = await blobStore.getEntryBlob(driveId, entry, { - length: 20, - }) + const blobSlice = await blobStore.getEntryBlob( + driveDiscoveryId, + entry, + { + length: 20, + } + ) if (!blobSlice) { reply.code(404) diff --git a/src/blob-store/index.js b/src/blob-store/index.js index a2ee4cac..93e0f76c 100644 --- a/src/blob-store/index.js +++ b/src/blob-store/index.js @@ -1,6 +1,7 @@ import Hyperdrive from 'hyperdrive' import b4a from 'b4a' import util from 'node:util' +import { discoveryKey } from 'hypercore-crypto' import { TypedEmitter } from 'tiny-typed-emitter' import { LiveDownload } from './live-download.js' @@ -28,7 +29,7 @@ class ErrNotFound extends Error { } export class BlobStore { - /** @type {Map} Indexed by hex-encoded key */ + /** @type {Map} Indexed by hex-encoded discovery key */ #hyperdrives = new Map() #writer /** @@ -48,17 +49,18 @@ export class BlobStore { for (const { key } of blobIndexCores) { // @ts-ignore - we know pretendCorestore is not actually a Corestore const drive = new Hyperdrive(corestore, key) - this.#hyperdrives.set(key.toString('hex'), drive) + this.#hyperdrives.set(getDiscoveryId(key), drive) if (key.equals(writerKey)) { this.#writer = proxyProps(drive, { key: writerKey }) } } coreManager.on('add-core', ({ key, namespace }) => { if (namespace !== 'blobIndex') return - if (this.#hyperdrives.has(key.toString('hex'))) return + const discoveryId = getDiscoveryId(key) + if (this.#hyperdrives.has(discoveryId)) return // @ts-ignore - we know pretendCorestore is not actually a Corestore const drive = new Hyperdrive(corestore, key) - this.#hyperdrives.set(key.toString('hex'), drive) + this.#hyperdrives.set(discoveryId, drive) this.#driveEmitter.emit('add-drive', drive) }) // This shouldn't happen, but this check ensures this.#writer is typed to exist @@ -66,16 +68,17 @@ export class BlobStore { throw new Error('Could not find a writer for the blobIndex namespace') } - get writerDriveId() { - return this.#writer.key.toString('hex') + get writerDriveDiscoveryId() { + return getDiscoveryId(this.#writer.key) } /** - * @param {string} driveId + * @param {string} driveDiscoveryId */ - #getDrive(driveId) { - const drive = this.#hyperdrives.get(driveId) - if (!drive) throw new Error('Drive not found ' + driveId.slice(0, 7)) + #getDrive(driveDiscoveryId) { + const drive = this.#hyperdrives.get(driveDiscoveryId) + if (!drive) + throw new Error('Drive not found ' + driveDiscoveryId.slice(0, 7)) return drive } @@ -85,8 +88,11 @@ export class BlobStore { * @param {false} [opts.wait=false] Set to `true` to wait for a blob to download, otherwise will throw if blob is not available locally * @param {never} [opts.timeout] Optional timeout to wait for a blob to download */ - async get({ type, variant, name, driveId }, { wait = false, timeout } = {}) { - const drive = this.#getDrive(driveId) + async get( + { type, variant, name, driveDiscoveryId }, + { wait = false, timeout } = {} + ) { + const drive = this.#getDrive(driveDiscoveryId) const path = makePath({ type, variant, name }) const blob = await drive.get(path, { wait, timeout }) if (!blob) throw new ErrNotFound() @@ -120,11 +126,11 @@ export class BlobStore { * @param {number} [options.timeout] Optional timeout to wait for a blob to download */ createReadStream( - { type, variant, name, driveId }, + { type, variant, name, driveDiscoveryId }, options = { wait: false } ) { // TODO: Error thrown from this be an emit error on the returned stream? - const drive = this.#getDrive(driveId) + const drive = this.#getDrive(driveDiscoveryId) const path = makePath({ type, variant, name }) // @ts-ignore - TODO: update @digidem/types to include wait/timeout options @@ -134,38 +140,44 @@ export class BlobStore { /** * Optimization for creating the blobs read stream when you have * previously read the entry from Hyperdrive using `drive.entry` - * @param {BlobId['driveId']} driveId Hyperdrive drive id + * @param {BlobId['driveDiscoveryId']} driveDiscoveryId Hyperdrive drive discovery id * @param {import('hyperdrive').HyperdriveEntry} entry Hyperdrive entry * @param {object} [options] * @param {boolean} [options.wait=false] Set to `true` to wait for a blob to download, otherwise will throw if blob is not available locally */ - async createEntryReadStream(driveId, entry, options = { wait: false }) { - const drive = this.#getDrive(driveId) + async createEntryReadStream( + driveDiscoveryId, + entry, + options = { wait: false } + ) { + const drive = this.#getDrive(driveDiscoveryId) const blobs = await drive.getBlobs() if (!blobs) throw new Error( - 'Hyperblobs instance not found for drive ' + driveId.slice(0, 7) + 'Hyperblobs instance not found for drive ' + + driveDiscoveryId.slice(0, 7) ) return blobs.createReadStream(entry.value.blob, options) } /** - * @param {BlobId['driveId']} driveId Hyperdrive drive id + * @param {BlobId['driveDiscoveryId']} driveDiscoveryId Hyperdrive drive id * @param {import('hyperdrive').HyperdriveEntry} entry Hyperdrive entry * @param {object} [opts] * @param {number} [opts.length] * * @returns {Promise} */ - async getEntryBlob(driveId, entry, { length } = {}) { - const drive = this.#getDrive(driveId) + async getEntryBlob(driveDiscoveryId, entry, { length } = {}) { + const drive = this.#getDrive(driveDiscoveryId) const blobs = await drive.getBlobs() if (!blobs) throw new Error( - 'Hyperblobs instance not found for drive ' + driveId.slice(0, 7) + 'Hyperblobs instance not found for drive ' + + driveDiscoveryId.slice(0, 7) ) return blobs.get(entry.value.blob, { wait: false, start: 0, length }) @@ -173,27 +185,29 @@ export class BlobStore { /** * - * @param {Omit} blobId + * @param {Omit} blobId * @param {Buffer} blob * @param {object} [options] * @param {{mimeType: string}} [options.metadata] Metadata to store with the blob - * @returns {Promise} public key as hex string of hyperdrive where blob is stored + * @returns {Promise} discovery key as hex string of hyperdrive where blob is stored */ async put({ type, variant, name }, blob, options) { const path = makePath({ type, variant, name }) await this.#writer.put(path, blob, options) - return this.#writer.key.toString('hex') + return this.writerDriveDiscoveryId } /** - * @param {Omit} blobId + * @param {Omit} blobId * @param {object} [options] * @param {{mimeType: string}} [options.metadata] Metadata to store with the blob */ createWriteStream({ type, variant, name }, options) { const path = makePath({ type, variant, name }) const stream = this.#writer.createWriteStream(path, options) - return proxyProps(stream, { driveId: this.#writer.key.toString('hex') }) + return proxyProps(stream, { + driveDiscoveryId: this.writerDriveDiscoveryId, + }) } /** @@ -205,11 +219,12 @@ export class BlobStore { * @returns {Promise} */ async entry( - { type, variant, name, driveId }, + { type, variant, name, driveDiscoveryId }, options = { follow: false, wait: false } ) { - const drive = this.#hyperdrives.get(driveId) - if (!drive) throw new Error('Drive not found ' + driveId.slice(0, 7)) + const drive = this.#hyperdrives.get(driveDiscoveryId) + if (!drive) + throw new Error('Drive not found ' + driveDiscoveryId.slice(0, 7)) const path = makePath({ type, variant, name }) const entry = await drive.entry(path, options) return entry @@ -221,9 +236,9 @@ export class BlobStore { * @param {boolean} [options.diff=false] Enable to return an object with a `block` property with number of bytes removed * @return {Promise<{ blocks: number } | null>} */ - async clear({ type, variant, name, driveId }, options = {}) { + async clear({ type, variant, name, driveDiscoveryId }, options = {}) { const path = makePath({ type, variant, name }) - const drive = this.#getDrive(driveId) + const drive = this.#getDrive(driveDiscoveryId) return drive.clear(path, options) } @@ -304,3 +319,11 @@ class PretendCorestore { /** no-op */ close() {} } + +/** + * @param {Buffer} key Public key of hypercore + * @returns {string} Hex-encoded value of derived discovery key + */ +function getDiscoveryId(key) { + return discoveryKey(key).toString('hex') +} diff --git a/src/types.ts b/src/types.ts index 977d8ba3..d10d4ea8 100644 --- a/src/types.ts +++ b/src/types.ts @@ -29,8 +29,8 @@ type BlobIdBase = { variant: BlobVariant /** unique identifier for blob (e.g. hash of content) */ name: string - /** public key as hex string of hyperdrive where blob is stored */ - driveId: string + /** discovery key as hex string of hyperdrive where blob is stored */ + driveDiscoveryId: string } // Ugly, but the only way I could figure out how to get what I wanted diff --git a/tests/blob-api.js b/tests/blob-api.js index 67ed241d..e172144c 100644 --- a/tests/blob-api.js +++ b/tests/blob-api.js @@ -9,7 +9,7 @@ import { createBlobStore } from './helpers/blob-store.js' import { timeoutException } from './helpers/index.js' test('get port after listening event with explicit port', async (t) => { - const blobStore = createBlobStore() + const { blobStore } = createBlobStore() const server = await createBlobServer({ blobStore }) t.ok(await timeoutException(getPort(server.server))) @@ -31,7 +31,7 @@ test('get port after listening event with explicit port', async (t) => { }) test('get port after listening event with unset port', async (t) => { - const blobStore = createBlobStore() + const { blobStore } = createBlobStore() const server = await createBlobServer({ blobStore }) t.ok(await timeoutException(getPort(server.server))) @@ -52,11 +52,11 @@ test('get port after listening event with unset port', async (t) => { test('get url from blobId', async (t) => { const projectId = '1234' - const type = 'image' + const type = 'photo' const variant = 'original' const name = '1234' - const blobStore = createBlobStore() + const { blobStore } = createBlobStore() const blobServer = await createBlobServer({ blobStore }) const blobApi = new BlobApi({ projectId: '1234', blobStore, blobServer }) @@ -66,12 +66,17 @@ test('get url from blobId', async (t) => { }) }) - const url = await blobApi.getUrl({ type, variant, name }) + const url = await blobApi.getUrl({ + driveDiscoveryId: blobStore.writerDriveDiscoveryId, + type, + variant, + name, + }) t.is( url, `http://127.0.0.1:${blobServer.server.address().port}/${projectId}/${ - blobStore.writerDriveId + blobStore.writerDriveDiscoveryId }/${type}/${variant}/${name}` ) t.teardown(async () => { @@ -109,7 +114,7 @@ test('create blobs', async (t) => { } ) - t.is(attachment.driveId, blobStore.writerDriveId) + t.is(attachment.driveDiscoveryId, blobStore.writerDriveDiscoveryId) t.is(attachment.type, 'photo') t.alike(attachment.hash, hash.digest('hex')) diff --git a/tests/blob-server.js b/tests/blob-server.js index 24ec7d26..8114399b 100644 --- a/tests/blob-server.js +++ b/tests/blob-server.js @@ -69,7 +69,7 @@ test('Invalid variant-type combination returns error', async (t) => { const url = buildRouteUrl({ projectId, - driveId: Buffer.alloc(32).toString('hex'), + driveDiscoveryId: Buffer.alloc(32).toString('hex'), name: 'foo', type: 'video', variant: 'thumbnail', @@ -169,7 +169,7 @@ test('GET photo uses mime type from metadata if found', async (t) => { const imageMimeType = getImageMimeType(image.ext) const metadata = imageMimeType ? { mimeType: imageMimeType } : undefined - const driveId = await blobStore.put(blobId, image.data, { + const driveDiscoveryId = await blobStore.put(blobId, image.data, { metadata: imageMimeType ? { mimeType: imageMimeType } : undefined, }) @@ -178,7 +178,7 @@ test('GET photo uses mime type from metadata if found', async (t) => { url: buildRouteUrl({ ...blobId, projectId, - driveId, + driveDiscoveryId, }), }) @@ -201,9 +201,12 @@ test('GET photo returns 404 when trying to get non-replicated blob', async (t) = const { destroy } = replicateBlobs(cm1, cm2) - await waitForCores(cm2, [Buffer.from(blobId.driveId, 'hex')]) + await waitForCores(cm2, [cm1.getWriterCore('blobIndex').key]) + /** @type {any}*/ - const replicatedCore = cm2.getCoreByKey(Buffer.from(blobId.driveId, 'hex')) + const replicatedCore = cm2.getCoreByDiscoveryKey( + Buffer.from(blobId.driveDiscoveryId, 'hex') + ) await replicatedCore.update({ wait: true }) await replicatedCore.download({ end: replicatedCore.length }).done() await destroy() @@ -239,21 +242,24 @@ test('GET photo returns 404 when trying to get non-existent blob', async (t) => url: buildRouteUrl({ ...blobId, projectId, - driveId: blobStore.writerDriveId, + driveDiscoveryId: blobStore.writerDriveDiscoveryId, }), }) t.is(res.statusCode, 404) } - const driveId = await blobStore.put(blobId, expected) - await blobStore.clear({ ...blobId, driveId: blobStore.writerDriveId }) + const driveDiscoveryId = await blobStore.put(blobId, expected) + await blobStore.clear({ + ...blobId, + driveDiscoveryId: blobStore.writerDriveDiscoveryId, + }) // Test that the entry exists but blob does not { const res = await server.inject({ method: 'GET', - url: buildRouteUrl({ ...blobId, projectId, driveId }), + url: buildRouteUrl({ ...blobId, projectId, driveDiscoveryId }), }) t.is(res.statusCode, 404) @@ -298,10 +304,10 @@ async function populateStore(blobStore) { name: parsedFixture.name, }) - const driveId = await blobStore.put(blobIdBase, diskBuffer) + const driveDiscoveryId = await blobStore.put(blobIdBase, diskBuffer) data.push({ - blobId: { ...blobIdBase, driveId }, + blobId: { ...blobIdBase, driveDiscoveryId }, image: { data: diskBuffer, ext: parsedFixture.ext }, }) } @@ -324,7 +330,7 @@ function getImageMimeType(extension) { * @param {object} opts * @param {string} [opts.prefix] * @param {string} opts.projectId - * @param {string} opts.driveId + * @param {string} opts.driveDiscoveryId * @param {string} opts.type * @param {string} opts.variant * @param {string} opts.name @@ -334,10 +340,10 @@ function getImageMimeType(extension) { function buildRouteUrl({ prefix = '', projectId, - driveId, + driveDiscoveryId, type, variant, name, }) { - return `${prefix}/${projectId}/${driveId}/${type}/${variant}/${name}` + return `${prefix}/${projectId}/${driveDiscoveryId}/${type}/${variant}/${name}` } diff --git a/tests/blob-store/blob-store.js b/tests/blob-store/blob-store.js index 369758df..49a0c338 100644 --- a/tests/blob-store/blob-store.js +++ b/tests/blob-store/blob-store.js @@ -9,6 +9,7 @@ import { createCoreManager, waitForCores } from '../helpers/core-manager.js' import { BlobStore } from '../../src/blob-store/index.js' import { setTimeout } from 'node:timers/promises' import { replicateBlobs, concat } from '../helpers/blob-store.js' +import { discoveryKey } from 'hypercore-crypto' // Test with buffers that are 3 times the default blockSize for hyperblobs const TEST_BUF_SIZE = 3 * 64 * 1024 @@ -21,12 +22,15 @@ test('blobStore.put(blobId, buf) and blobStore.get(blobId)', async (t) => { variant: 'original', name: 'test-file', }) - const driveId = await blobStore.put(blobId, diskbuf) - const bndlbuf = await blobStore.get({ ...blobId, driveId }) + const driveDiscoveryId = await blobStore.put(blobId, diskbuf) + const bndlbuf = await blobStore.get({ + ...blobId, + driveDiscoveryId: driveDiscoveryId, + }) t.alike(bndlbuf, diskbuf, 'should be equal') }) -test('get(), driveId not found', async (t) => { +test('get(), driveDiscoveryId not found', async (t) => { const { blobStore } = await testenv() await t.exception( async () => @@ -34,21 +38,24 @@ test('get(), driveId not found', async (t) => { type: 'photo', variant: 'original', name: 'test-file', - driveId: randomBytes(32).toString('hex'), + driveDiscoveryId: randomBytes(32).toString('hex'), }) ) }) -test('get(), valid driveId, missing file', async (t) => { +test('get(), valid driveDiscoveryId, missing file', async (t) => { const { blobStore, coreManager } = await testenv() - const driveId = coreManager.getWriterCore('blobIndex').key.toString('hex') + const driveDiscoveryId = discoveryKey( + coreManager.getWriterCore('blobIndex').key + ).toString('hex') + await t.exception( async () => await blobStore.get({ type: 'photo', variant: 'original', name: 'test-file', - driveId, + driveDiscoveryId: driveDiscoveryId, }) ) }) @@ -56,7 +63,7 @@ test('get(), valid driveId, missing file', async (t) => { test('get(), uninitialized drive', async (t) => { const { blobStore, coreManager } = await testenv() const driveKey = randomBytes(32) - const driveId = driveKey.toString('hex') + const driveDiscoveryId = discoveryKey(driveKey).toString('hex') coreManager.addCore(driveKey, 'blobIndex') await t.exception( async () => @@ -64,7 +71,7 @@ test('get(), uninitialized drive', async (t) => { type: 'photo', variant: 'original', name: 'test-file', - driveId, + driveDiscoveryId, }) ) }) @@ -80,12 +87,15 @@ test('get(), initialized but unreplicated drive', async (t) => { variant: 'original', name: 'blob1', }) - const driveId = await bs1.put(blob1Id, blob1) + const driveDiscoveryId = await bs1.put(blob1Id, blob1) const { destroy } = replicateBlobs(cm1, cm2) - await waitForCores(cm2, [Buffer.from(driveId, 'hex')]) + await waitForCores(cm2, [cm1.getWriterCore('blobIndex').key]) + /** @type {any} */ - const replicatedCore = cm2.getCoreByKey(Buffer.from(driveId, 'hex')) + const replicatedCore = cm2.getCoreByDiscoveryKey( + Buffer.from(driveDiscoveryId, 'hex') + ) await replicatedCore.update({ wait: true }) await destroy() t.is(replicatedCore.contiguousLength, 0, 'data is not downloaded') @@ -94,7 +104,7 @@ test('get(), initialized but unreplicated drive', async (t) => { async () => await bs2.get({ ...blob1Id, - driveId, + driveDiscoveryId, }) ) }) @@ -110,12 +120,14 @@ test('get(), replicated blobIndex, but blobs not replicated', async (t) => { variant: 'original', name: 'blob1', }) - const driveId = await bs1.put(blob1Id, blob1) + const driveDiscoveryId = await bs1.put(blob1Id, blob1) const { destroy } = replicateBlobs(cm1, cm2) - await waitForCores(cm2, [Buffer.from(driveId, 'hex')]) + await waitForCores(cm2, [cm1.getWriterCore('blobIndex').key]) /** @type {any} */ - const replicatedCore = cm2.getCoreByKey(Buffer.from(driveId, 'hex')) + const replicatedCore = cm2.getCoreByDiscoveryKey( + Buffer.from(driveDiscoveryId, 'hex') + ) await replicatedCore.update({ wait: true }) await replicatedCore.download({ end: replicatedCore.length }).done() await destroy() @@ -130,7 +142,7 @@ test('get(), replicated blobIndex, but blobs not replicated', async (t) => { async () => await bs2.get({ ...blob1Id, - driveId, + driveDiscoveryId, }) ) }) @@ -144,10 +156,13 @@ test('blobStore.createWriteStream(blobId) and blobStore.createReadStream(blobId) name: 'test-file', }) const ws = blobStore.createWriteStream(blobId) - const { driveId } = ws + const { driveDiscoveryId } = ws await pipeline(fs.createReadStream(new URL(import.meta.url)), ws) const bndlbuf = await concat( - blobStore.createReadStream({ ...blobId, driveId }) + blobStore.createReadStream({ + ...blobId, + driveDiscoveryId, + }) ) t.alike(bndlbuf, diskbuf, 'should be equal') }) @@ -165,7 +180,7 @@ test('blobStore.createReadStream should not wait', async (t) => { try { const result = blobStore.createReadStream({ ...blobId, - driveId: blobStore.writerDriveId, + driveDiscoveryId: blobStore.writerDriveDiscoveryId, }) await concat(result) } catch (error) { @@ -180,7 +195,7 @@ test('blobStore.createReadStream should not wait', async (t) => { { const stream = blobStore.createReadStream({ ...blobId, - driveId: blobStore.writerDriveId, + driveDiscoveryId: blobStore.writerDriveDiscoveryId, }) const blob = await concat(stream) t.alike(blob, expected, 'should be equal') @@ -189,7 +204,7 @@ test('blobStore.createReadStream should not wait', async (t) => { try { const stream = blobStore2.createReadStream({ ...blobId, - driveId: blobStore2.writerDriveId, + driveDiscoveryId: blobStore2.writerDriveDiscoveryId, }) await concat(stream) } catch (error) { @@ -202,17 +217,20 @@ test('blobStore.createReadStream should not wait', async (t) => { { const stream = blobStore2.createReadStream({ ...blobId, - driveId: blobStore2.writerDriveId, + driveDiscoveryId: blobStore2.writerDriveDiscoveryId, }) const blob = await concat(stream) t.alike(blob, expected, 'should be equal') - await blobStore2.clear({ ...blobId, driveId: blobStore2.writerDriveId }) + await blobStore2.clear({ + ...blobId, + driveDiscoveryId: blobStore2.writerDriveDiscoveryId, + }) try { const stream = blobStore2.createReadStream({ ...blobId, - driveId: blobStore2.writerDriveId, + driveDiscoveryId: blobStore2.writerDriveDiscoveryId, }) await concat(stream) } catch (error) { @@ -221,7 +239,7 @@ test('blobStore.createReadStream should not wait', async (t) => { } }) -test('blobStore.writerDriveId', async (t) => { +test('blobStore.writerDriveDiscoveryId', async (t) => { { const { blobStore } = await testenv() const blobId = /** @type {const} */ ({ @@ -231,9 +249,9 @@ test('blobStore.writerDriveId', async (t) => { }) const ws = blobStore.createWriteStream(blobId) t.is( - ws.driveId, - blobStore.writerDriveId, - 'writerDriveId is same as driveId used for createWriteStream' + ws.driveDiscoveryId, + blobStore.writerDriveDiscoveryId, + 'writerDriveDiscoveryId is same as driveDiscoveryId used for createWriteStream' ) } { @@ -243,11 +261,11 @@ test('blobStore.writerDriveId', async (t) => { variant: 'original', name: 'test-file', }) - const driveId = await blobStore.put(blobId, Buffer.from('hello')) + const driveDiscoveryId = await blobStore.put(blobId, Buffer.from('hello')) t.is( - driveId, - blobStore.writerDriveId, - 'writerDriveId is same as driveId returned by put()' + driveDiscoveryId, + blobStore.writerDriveDiscoveryId, + 'writerDriveDiscoveryId is same as driveDiscoveryId returned by put()' ) } }) @@ -275,7 +293,7 @@ test('live download', async function (t) { }) // STEP 1: Write a blob to CM1 - const driveId1 = await bs1.put(blob1Id, blob1) + const driveDiscoveryId1 = await bs1.put(blob1Id, blob1) // STEP 2: Replicate CM1 with CM3 const { destroy: destroy1 } = replicateBlobs(cm1, cm3) // STEP 3: Start live download to CM3 @@ -285,7 +303,7 @@ test('live download', async function (t) { // STEP 5: Replicate CM2 with CM3 const { destroy: destroy2 } = replicateBlobs(cm2, cm3) // STEP 6: Write a blob to CM2 - const driveId2 = await bs2.put(blob2Id, blob2) + const driveDiscoveryId2 = await bs2.put(blob2Id, blob2) // STEP 7: Wait for blobs to be downloaded await downloaded(liveDownload) // STEP 8: destroy all the replication streams @@ -293,12 +311,12 @@ test('live download', async function (t) { // Both blob1 and blob2 (from CM1 and CM2) should have been downloaded to CM3 t.alike( - await bs3.get({ ...blob1Id, driveId: driveId1 }), + await bs3.get({ ...blob1Id, driveDiscoveryId: driveDiscoveryId1 }), blob1, 'blob1 was downloaded' ) t.alike( - await bs3.get({ ...blob2Id, driveId: driveId2 }), + await bs3.get({ ...blob2Id, driveDiscoveryId: driveDiscoveryId2 }), blob2, 'blob2 was downloaded' ) @@ -328,7 +346,7 @@ test('sparse live download', async function (t) { name: 'blob3', }) - const driveId = await bs1.put(blob1Id, blob1) + const driveDiscoveryId = await bs1.put(blob1Id, blob1) await bs1.put(blob2Id, blob2) await bs1.put(blob3Id, blob3) @@ -339,10 +357,18 @@ test('sparse live download', async function (t) { await destroy() - t.alike(await bs2.get({ ...blob1Id, driveId }), blob1, 'blob1 was downloaded') - t.alike(await bs2.get({ ...blob2Id, driveId }), blob2, 'blob2 was downloaded') + t.alike( + await bs2.get({ ...blob1Id, driveDiscoveryId: driveDiscoveryId }), + blob1, + 'blob1 was downloaded' + ) + t.alike( + await bs2.get({ ...blob2Id, driveDiscoveryId: driveDiscoveryId }), + blob2, + 'blob2 was downloaded' + ) await t.exception( - () => bs2.get({ ...blob3Id, driveId }), + () => bs2.get({ ...blob3Id, driveDiscoveryId: driveDiscoveryId }), 'blob3 was not downloaded' ) }) @@ -367,7 +393,7 @@ test('cancelled live download', async function (t) { }) // STEP 1: Write a blob to CM1 - const driveId1 = await bs1.put(blob1Id, blob1) + const driveDiscoveryId1 = await bs1.put(blob1Id, blob1) // STEP 2: Replicate CM1 with CM3 const { destroy: destroy1 } = replicateBlobs(cm1, cm3) // STEP 3: Start live download to CM3 @@ -380,7 +406,7 @@ test('cancelled live download', async function (t) { // STEP 6: Replicate CM2 with CM3 const { destroy: destroy2 } = replicateBlobs(cm2, cm3) // STEP 7: Write a blob to CM2 - const driveId2 = await bs2.put(blob2Id, blob2) + const driveDiscoveryId2 = await bs2.put(blob2Id, blob2) // STEP 8: Wait for blobs to (not) download await setTimeout(200) // STEP 9: destroy all the replication streams @@ -388,17 +414,17 @@ test('cancelled live download', async function (t) { // Both blob1 and blob2 (from CM1 and CM2) should have been downloaded to CM3 t.alike( - await bs3.get({ ...blob1Id, driveId: driveId1 }), + await bs3.get({ ...blob1Id, driveDiscoveryId: driveDiscoveryId1 }), blob1, 'blob1 was downloaded' ) await t.exception( - async () => bs3.get({ ...blob2Id, driveId: driveId2 }), + async () => bs3.get({ ...blob2Id, driveDiscoveryId: driveDiscoveryId2 }), 'blob2 was not downloaded' ) }) -test('blobStore.getEntryBlob(driveId, entry)', async (t) => { +test('blobStore.getEntryBlob(driveDiscoveryId, entry)', async (t) => { const { blobStore } = await testenv() const diskbuf = await readFile(new URL(import.meta.url)) const blobId = /** @type {const} */ ({ @@ -406,15 +432,15 @@ test('blobStore.getEntryBlob(driveId, entry)', async (t) => { variant: 'original', name: 'test-file', }) - const driveId = await blobStore.put(blobId, diskbuf) - const entry = await blobStore.entry({ ...blobId, driveId }) + const driveDiscoveryId = await blobStore.put(blobId, diskbuf) + const entry = await blobStore.entry({ ...blobId, driveDiscoveryId }) - const buf = await blobStore.getEntryBlob(driveId, entry) + const buf = await blobStore.getEntryBlob(driveDiscoveryId, entry) t.alike(buf, diskbuf, 'should be equal') }) -test('blobStore.getEntryReadStream(driveId, entry)', async (t) => { +test('blobStore.getEntryReadStream(driveDiscoveryId, entry)', async (t) => { const { blobStore } = await testenv() const diskbuf = await readFile(new URL(import.meta.url)) const blobId = /** @type {const} */ ({ @@ -422,17 +448,17 @@ test('blobStore.getEntryReadStream(driveId, entry)', async (t) => { variant: 'original', name: 'test-file', }) - const driveId = await blobStore.put(blobId, diskbuf) - const entry = await blobStore.entry({ ...blobId, driveId }) + const driveDiscoveryId = await blobStore.put(blobId, diskbuf) + const entry = await blobStore.entry({ ...blobId, driveDiscoveryId }) const buf = await concat( - await blobStore.createEntryReadStream(driveId, entry) + await blobStore.createEntryReadStream(driveDiscoveryId, entry) ) t.alike(buf, diskbuf, 'should be equal') }) -test('blobStore.getEntryReadStream(driveId, entry) should not wait', async (t) => { +test('blobStore.getEntryReadStream(driveDiscoveryId, entry) should not wait', async (t) => { const { blobStore } = await testenv() const expected = await readFile(new URL(import.meta.url)) @@ -443,12 +469,18 @@ test('blobStore.getEntryReadStream(driveId, entry) should not wait', async (t) = name: 'test-file', }) - const driveId = await blobStore.put(blobId, expected) - const entry = await blobStore.entry({ ...blobId, driveId }) - await blobStore.clear({ ...blobId, driveId: blobStore.writerDriveId }) + const driveDiscoveryId = await blobStore.put(blobId, expected) + const entry = await blobStore.entry({ ...blobId, driveDiscoveryId }) + await blobStore.clear({ + ...blobId, + driveDiscoveryId: blobStore.writerDriveDiscoveryId, + }) try { - const stream = await blobStore.createEntryReadStream(driveId, entry) + const stream = await blobStore.createEntryReadStream( + driveDiscoveryId, + entry + ) await concat(stream) } catch (error) { t.is(error.message, 'Block not available', 'Block not available')