diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6fec13aab8e..b912ff0ff78 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -4708,7 +4708,7 @@ buildvariants: display_name: rhel8 Node Latest run_on: rhel80-large expansions: - NODE_LTS_VERSION: 20 + NODE_LTS_VERSION: latest CLIENT_ENCRYPTION: true tasks: - test-latest-server @@ -4749,9 +4749,10 @@ buildvariants: - test-latest-load-balanced - test-auth-kerberos - test-auth-ldap - - test-socks5 - test-socks5-csfle - test-socks5-tls + - test-zstd-compression + - test-snappy-compression - test-tls-support-latest - test-tls-support-8.0 - test-tls-support-7.0 diff --git a/.evergreen/generate_evergreen_tasks.js b/.evergreen/generate_evergreen_tasks.js index 52d5d2124e3..13b5deec497 100644 --- a/.evergreen/generate_evergreen_tasks.js +++ b/.evergreen/generate_evergreen_tasks.js @@ -395,7 +395,7 @@ for (const { name: `${osName}-node-latest`, display_name: `${osDisplayName} Node Latest`, run_on, - expansions: { NODE_LTS_VERSION: LATEST_LTS }, + expansions: { NODE_LTS_VERSION: 'latest' }, tasks: tasks.map(({ name }) => name) }; if (clientEncryption) { diff --git a/src/cmap/commands.ts b/src/cmap/commands.ts index 9322fc53414..f14c3f5de4c 100644 --- a/src/cmap/commands.ts +++ b/src/cmap/commands.ts @@ -429,10 +429,60 @@ export interface OpMsgOptions { /** @internal */ export class DocumentSequence { + field: string; documents: Document[]; + serializedDocumentsLength: number; + private chunks: Uint8Array[]; + private header: Buffer; - constructor(documents: Document[]) { - this.documents = documents; + /** + * Create a new document sequence for the provided field. + * @param field - The field it will replace. + */ + constructor(field: string, documents?: Document[]) { + this.field = field; + this.documents = []; + this.chunks = []; + this.serializedDocumentsLength = 0; + // Document sequences starts with type 1 at the first byte. + // Field strings must always be UTF-8. + const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1); + buffer[0] = 1; + // Third part is the field name at offset 5 with trailing null byte. + encodeUTF8Into(buffer, `${this.field}\0`, 5); + this.chunks.push(buffer); + this.header = buffer; + if (documents) { + for (const doc of documents) { + this.push(doc, BSON.serialize(doc)); + } + } + } + + /** + * Push a document to the document sequence. Will serialize the document + * as well and return the current serialized length of all documents. + * @param document - The document to add. + * @param buffer - The serialized document in raw BSON. + * @returns The new total document sequence length. + */ + push(document: Document, buffer: Uint8Array): number { + this.serializedDocumentsLength += buffer.length; + // Push the document. + this.documents.push(document); + // Push the document raw bson. + this.chunks.push(buffer); + // Write the new length. + this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1); + return this.serializedDocumentsLength + this.header.length; + } + + /** + * Get the fully serialized bytes for the document sequence section. + * @returns The section bytes. + */ + toBin(): Uint8Array { + return Buffer.concat(this.chunks); } } @@ -543,21 +593,7 @@ export class OpMsgRequest { const chunks = []; for (const [key, value] of Object.entries(document)) { if (value instanceof DocumentSequence) { - // Document sequences starts with type 1 at the first byte. - const buffer = Buffer.allocUnsafe(1 + 4 + key.length + 1); - buffer[0] = 1; - // Third part is the field name at offset 5 with trailing null byte. - encodeUTF8Into(buffer, `${key}\0`, 5); - chunks.push(buffer); - // Fourth part are the documents' bytes. - let docsLength = 0; - for (const doc of value.documents) { - const docBson = this.serializeBson(doc); - docsLength += docBson.length; - chunks.push(docBson); - } - // Second part of the sequence is the length at offset 1; - buffer.writeInt32LE(4 + key.length + 1 + docsLength, 1); + chunks.push(value.toBin()); // Why are we removing the field from the command? This is because it needs to be // removed in the OP_MSG request first section, and DocumentSequence is not a // BSON type and is specific to the MongoDB wire protocol so there's nothing diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 986cce46b6e..0837c54d3fa 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -237,6 +237,8 @@ export class Connection extends TypedEventEmitter { .on('error', this.onError.bind(this)); this.socket.on('close', this.onClose.bind(this)); this.socket.on('timeout', this.onTimeout.bind(this)); + + this.messageStream.pause(); } public get hello() { @@ -651,6 +653,7 @@ export class Connection extends TypedEventEmitter { private async *readMany(): AsyncGenerator { try { this.dataEvents = onData(this.messageStream); + this.messageStream.resume(); for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; @@ -661,6 +664,7 @@ export class Connection extends TypedEventEmitter { } } finally { this.dataEvents = null; + this.messageStream.pause(); this.throwIfAborted(); } } diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts index a1ae31fba30..06f34dfc52f 100644 --- a/src/cursor/client_bulk_write_cursor.ts +++ b/src/cursor/client_bulk_write_cursor.ts @@ -1,8 +1,10 @@ -import type { Document } from '../bson'; +import { type Document } from 'bson'; + import { type ClientBulkWriteCursorResponse } from '../cmap/wire_protocol/responses'; -import { MongoBulkWriteCursorError } from '../error'; +import { MongoClientBulkWriteCursorError } from '../error'; import type { MongoClient } from '../mongo_client'; import { ClientBulkWriteOperation } from '../operations/client_bulk_write/client_bulk_write'; +import { type ClientBulkWriteCommandBuilder } from '../operations/client_bulk_write/command_builder'; import { type ClientBulkWriteOptions } from '../operations/client_bulk_write/common'; import { executeOperation } from '../operations/execute_operation'; import type { ClientSession } from '../sessions'; @@ -24,17 +26,21 @@ export interface ClientBulkWriteCursorOptions * @internal */ export class ClientBulkWriteCursor extends AbstractCursor { - public readonly command: Document; + commandBuilder: ClientBulkWriteCommandBuilder; /** @internal */ private cursorResponse?: ClientBulkWriteCursorResponse; /** @internal */ private clientBulkWriteOptions: ClientBulkWriteOptions; /** @internal */ - constructor(client: MongoClient, command: Document, options: ClientBulkWriteOptions = {}) { + constructor( + client: MongoClient, + commandBuilder: ClientBulkWriteCommandBuilder, + options: ClientBulkWriteOptions = {} + ) { super(client, new MongoDBNamespace('admin', '$cmd'), options); - this.command = command; + this.commandBuilder = commandBuilder; this.clientBulkWriteOptions = options; } @@ -44,22 +50,29 @@ export class ClientBulkWriteCursor extends AbstractCursor { */ get response(): ClientBulkWriteCursorResponse { if (this.cursorResponse) return this.cursorResponse; - throw new MongoBulkWriteCursorError( + throw new MongoClientBulkWriteCursorError( 'No client bulk write cursor response returned from the server.' ); } + /** + * Get the last set of operations the cursor executed. + */ + get operations(): Document[] { + return this.commandBuilder.lastOperations; + } + clone(): ClientBulkWriteCursor { const clonedOptions = mergeOptions({}, this.clientBulkWriteOptions); delete clonedOptions.session; - return new ClientBulkWriteCursor(this.client, this.command, { + return new ClientBulkWriteCursor(this.client, this.commandBuilder, { ...clonedOptions }); } /** @internal */ async _initialize(session: ClientSession): Promise { - const clientBulkWriteOperation = new ClientBulkWriteOperation(this.command, { + const clientBulkWriteOperation = new ClientBulkWriteOperation(this.commandBuilder, { ...this.clientBulkWriteOptions, ...this.cursorOptions, session diff --git a/src/error.ts b/src/error.ts index c9652877cb2..4aed6b93146 100644 --- a/src/error.ts +++ b/src/error.ts @@ -622,7 +622,7 @@ export class MongoGCPError extends MongoOIDCError { * @public * @category Error */ -export class MongoBulkWriteCursorError extends MongoRuntimeError { +export class MongoClientBulkWriteCursorError extends MongoRuntimeError { /** * **Do not use this constructor!** * @@ -639,7 +639,34 @@ export class MongoBulkWriteCursorError extends MongoRuntimeError { } override get name(): string { - return 'MongoBulkWriteCursorError'; + return 'MongoClientBulkWriteCursorError'; + } +} + +/** + * An error indicating that an error occurred on the client when executing a client bulk write. + * + * @public + * @category Error + */ +export class MongoClientBulkWriteExecutionError extends MongoRuntimeError { + /** + * **Do not use this constructor!** + * + * Meant for internal use only. + * + * @remarks + * This class is only meant to be constructed within the driver. This constructor is + * not subject to semantic versioning compatibility guarantees and may change at any time. + * + * @public + **/ + constructor(message: string) { + super(message); + } + + override get name(): string { + return 'MongoClientBulkWriteExecutionError'; } } diff --git a/src/index.ts b/src/index.ts index f68dd7699e0..97f964ce546 100644 --- a/src/index.ts +++ b/src/index.ts @@ -44,8 +44,9 @@ export { MongoAWSError, MongoAzureError, MongoBatchReExecutionError, - MongoBulkWriteCursorError, MongoChangeStreamError, + MongoClientBulkWriteCursorError, + MongoClientBulkWriteExecutionError, MongoCompatibilityError, MongoCursorExhaustedError, MongoCursorInUseError, diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index cb020bde40c..b04c978114d 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -1,11 +1,11 @@ -import { type Document } from 'bson'; - +import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; +import { type ClientBulkWriteCommandBuilder } from './command_builder'; import { type ClientBulkWriteOptions } from './common'; /** @@ -13,16 +13,16 @@ import { type ClientBulkWriteOptions } from './common'; * @internal */ export class ClientBulkWriteOperation extends CommandOperation { - command: Document; + commandBuilder: ClientBulkWriteCommandBuilder; override options: ClientBulkWriteOptions; override get commandName() { return 'bulkWrite' as const; } - constructor(command: Document, options: ClientBulkWriteOptions) { + constructor(commandBuilder: ClientBulkWriteCommandBuilder, options: ClientBulkWriteOptions) { super(undefined, options); - this.command = command; + this.commandBuilder = commandBuilder; this.options = options; this.ns = new MongoDBNamespace('admin', '$cmd'); } @@ -37,9 +37,45 @@ export class ClientBulkWriteOperation extends CommandOperation { - return await super.executeCommand(server, session, this.command, ClientBulkWriteCursorResponse); + let command; + + if (server.description.type === ServerType.LoadBalancer) { + if (session) { + // Checkout a connection to build the command. + const connection = await server.pool.checkOut(); + // Pin the connection to the session so it get used to execute the command and we do not + // perform a double check-in/check-out. + session.pin(connection); + command = this.commandBuilder.buildBatch( + connection.hello?.maxMessageSizeBytes, + connection.hello?.maxWriteBatchSize + ); + } else { + throw new MongoClientBulkWriteExecutionError( + 'Session provided to the client bulk write operation must be present.' + ); + } + } else { + // At this point we have a server and the auto connect code has already + // run in executeOperation, so the server description will be populated. + // We can use that to build the command. + if (!server.description.maxWriteBatchSize || !server.description.maxMessageSizeBytes) { + throw new MongoClientBulkWriteExecutionError( + 'In order to execute a client bulk write, both maxWriteBatchSize and maxMessageSizeBytes must be provided by the servers hello response.' + ); + } + command = this.commandBuilder.buildBatch( + server.description.maxMessageSizeBytes, + server.description.maxWriteBatchSize + ); + } + return await super.executeCommand(server, session, command, ClientBulkWriteCursorResponse); } } // Skipping the collation as it goes on the individual ops. -defineAspects(ClientBulkWriteOperation, [Aspect.WRITE_OPERATION, Aspect.SKIP_COLLATION]); +defineAspects(ClientBulkWriteOperation, [ + Aspect.WRITE_OPERATION, + Aspect.SKIP_COLLATION, + Aspect.CURSOR_CREATING +]); diff --git a/src/operations/client_bulk_write/command_builder.ts b/src/operations/client_bulk_write/command_builder.ts index ad7ab953605..bf1b72b2b23 100644 --- a/src/operations/client_bulk_write/command_builder.ts +++ b/src/operations/client_bulk_write/command_builder.ts @@ -1,4 +1,4 @@ -import { type Document } from '../../bson'; +import { BSON, type Document } from '../../bson'; import { DocumentSequence } from '../../cmap/commands'; import { type PkFactory } from '../../mongo_client'; import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types'; @@ -28,11 +28,18 @@ export interface ClientBulkWriteCommand { comment?: any; } +/** + * The bytes overhead for the extra fields added post command generation. + */ +const MESSAGE_OVERHEAD_BYTES = 1000; + /** @internal */ export class ClientBulkWriteCommandBuilder { models: AnyClientBulkWriteModel[]; options: ClientBulkWriteOptions; pkFactory: PkFactory; + currentModelIndex: number; + lastOperations: Document[]; /** * Create the command builder. @@ -46,6 +53,8 @@ export class ClientBulkWriteCommandBuilder { this.models = models; this.options = options; this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY; + this.currentModelIndex = 0; + this.lastOperations = []; } /** @@ -60,34 +69,96 @@ export class ClientBulkWriteCommandBuilder { } /** - * Build the bulk write commands from the models. + * Determines if there is another batch to process. + * @returns True if not all batches have been built. + */ + hasNextBatch(): boolean { + return this.currentModelIndex < this.models.length; + } + + /** + * Build a single batch of a client bulk write command. + * @param maxMessageSizeBytes - The max message size in bytes. + * @param maxWriteBatchSize - The max write batch size. + * @returns The client bulk write command. */ - buildCommands(): ClientBulkWriteCommand[] { - // Iterate the models to build the ops and nsInfo fields. - const operations = []; + buildBatch(maxMessageSizeBytes: number, maxWriteBatchSize: number): ClientBulkWriteCommand { + let commandLength = 0; let currentNamespaceIndex = 0; + const command: ClientBulkWriteCommand = this.baseCommand(); const namespaces = new Map(); - for (const model of this.models) { + + while (this.currentModelIndex < this.models.length) { + const model = this.models[this.currentModelIndex]; const ns = model.namespace; - const index = namespaces.get(ns); - if (index != null) { - operations.push(buildOperation(model, index, this.pkFactory)); + const nsIndex = namespaces.get(ns); + + if (nsIndex != null) { + // Build the operation and serialize it to get the bytes buffer. + const operation = buildOperation(model, nsIndex, this.pkFactory); + const operationBuffer = BSON.serialize(operation); + + // Check if the operation buffer can fit in the command. If it can, + // then add the operation to the document sequence and increment the + // current length as long as the ops don't exceed the maxWriteBatchSize. + if ( + commandLength + operationBuffer.length < maxMessageSizeBytes && + command.ops.documents.length < maxWriteBatchSize + ) { + // Pushing to the ops document sequence returns the total byte length of the document sequence. + commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer); + // Increment the builder's current model index. + this.currentModelIndex++; + } else { + // The operation cannot fit in the current command and will need to + // go in the next batch. Exit the loop. + break; + } } else { + // The namespace is not already in the nsInfo so we will set it in the map, and + // construct our nsInfo and ops documents and buffers. namespaces.set(ns, currentNamespaceIndex); - operations.push(buildOperation(model, currentNamespaceIndex, this.pkFactory)); - currentNamespaceIndex++; + const nsInfo = { ns: ns }; + const nsInfoBuffer = BSON.serialize(nsInfo); + const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory); + const operationBuffer = BSON.serialize(operation); + + // Check if the operation and nsInfo buffers can fit in the command. If they + // can, then add the operation and nsInfo to their respective document + // sequences and increment the current length as long as the ops don't exceed + // the maxWriteBatchSize. + if ( + commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes && + command.ops.documents.length < maxWriteBatchSize + ) { + // Pushing to the ops document sequence returns the total byte length of the document sequence. + commandLength = + MESSAGE_OVERHEAD_BYTES + + command.nsInfo.push(nsInfo, nsInfoBuffer) + + command.ops.push(operation, operationBuffer); + // We've added a new namespace, increment the namespace index. + currentNamespaceIndex++; + // Increment the builder's current model index. + this.currentModelIndex++; + } else { + // The operation cannot fit in the current command and will need to + // go in the next batch. Exit the loop. + break; + } } } + // Set the last operations and return the command. + this.lastOperations = command.ops.documents; + return command; + } - const nsInfo = Array.from(namespaces.keys(), ns => ({ ns })); - - // The base command. + private baseCommand(): ClientBulkWriteCommand { const command: ClientBulkWriteCommand = { bulkWrite: 1, errorsOnly: this.errorsOnly, ordered: this.options.ordered ?? true, - ops: new DocumentSequence(operations), - nsInfo: new DocumentSequence(nsInfo) + ops: new DocumentSequence('ops'), + nsInfo: new DocumentSequence('nsInfo') }; // Add bypassDocumentValidation if it was present in the options. if (this.options.bypassDocumentValidation != null) { @@ -103,7 +174,8 @@ export class ClientBulkWriteCommandBuilder { if (this.options.comment !== undefined) { command.comment = this.options.comment; } - return [command]; + + return command; } } diff --git a/src/operations/client_bulk_write/executor.ts b/src/operations/client_bulk_write/executor.ts index 74511ede9dd..5baf1ed6b6e 100644 --- a/src/operations/client_bulk_write/executor.ts +++ b/src/operations/client_bulk_write/executor.ts @@ -1,11 +1,9 @@ -import { type Document } from 'bson'; - import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor'; import { type MongoClient } from '../../mongo_client'; import { WriteConcern } from '../../write_concern'; import { executeOperation } from '../execute_operation'; import { ClientBulkWriteOperation } from './client_bulk_write'; -import { type ClientBulkWriteCommand, ClientBulkWriteCommandBuilder } from './command_builder'; +import { ClientBulkWriteCommandBuilder } from './command_builder'; import { type AnyClientBulkWriteModel, type ClientBulkWriteOptions, @@ -57,43 +55,26 @@ export class ClientBulkWriteExecutor { this.options, pkFactory ); - const commands = commandBuilder.buildCommands(); + // Unacknowledged writes need to execute all batches and return { ok: 1} if (this.options.writeConcern?.w === 0) { - return await executeUnacknowledged(this.client, this.options, commands); + while (commandBuilder.hasNextBatch()) { + const operation = new ClientBulkWriteOperation(commandBuilder, this.options); + await executeOperation(this.client, operation); + } + return { ok: 1 }; + } else { + const resultsMerger = new ClientBulkWriteResultsMerger(this.options); + // For each command will will create and exhaust a cursor for the results. + let currentBatchOffset = 0; + while (commandBuilder.hasNextBatch()) { + const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options); + const docs = await cursor.toArray(); + const operations = cursor.operations; + resultsMerger.merge(currentBatchOffset, operations, cursor.response, docs); + // Set the new batch index so we can back back to the index in the original models. + currentBatchOffset += operations.length; + } + return resultsMerger.result; } - return await executeAcknowledged(this.client, this.options, commands); - } -} - -/** - * Execute an acknowledged bulk write. - */ -async function executeAcknowledged( - client: MongoClient, - options: ClientBulkWriteOptions, - commands: ClientBulkWriteCommand[] -): Promise { - const resultsMerger = new ClientBulkWriteResultsMerger(options); - // For each command will will create and exhaust a cursor for the results. - for (const command of commands) { - const cursor = new ClientBulkWriteCursor(client, command, options); - const docs = await cursor.toArray(); - resultsMerger.merge(command.ops.documents, cursor.response, docs); - } - return resultsMerger.result; -} - -/** - * Execute an unacknowledged bulk write. - */ -async function executeUnacknowledged( - client: MongoClient, - options: ClientBulkWriteOptions, - commands: Document[] -): Promise<{ ok: 1 }> { - for (const command of commands) { - const operation = new ClientBulkWriteOperation(command, options); - await executeOperation(client, operation); } - return { ok: 1 }; } diff --git a/src/operations/client_bulk_write/results_merger.ts b/src/operations/client_bulk_write/results_merger.ts index 48169b93b7d..ca5f3f16048 100644 --- a/src/operations/client_bulk_write/results_merger.ts +++ b/src/operations/client_bulk_write/results_merger.ts @@ -42,11 +42,13 @@ export class ClientBulkWriteResultsMerger { /** * Merge the results in the cursor to the existing result. + * @param currentBatchOffset - The offset index to the original models. * @param response - The cursor response. * @param documents - The documents in the cursor. * @returns The current result. */ merge( + currentBatchOffset: number, operations: Document[], response: ClientBulkWriteCursorResponse, documents: Document[] @@ -67,7 +69,9 @@ export class ClientBulkWriteResultsMerger { const operation = operations[document.idx]; // Handle insert results. if ('insert' in operation) { - this.result.insertResults?.set(document.idx, { insertedId: operation.document._id }); + this.result.insertResults?.set(document.idx + currentBatchOffset, { + insertedId: operation.document._id + }); } // Handle update results. if ('update' in operation) { @@ -80,11 +84,13 @@ export class ClientBulkWriteResultsMerger { if (document.upserted) { result.upsertedId = document.upserted._id; } - this.result.updateResults?.set(document.idx, result); + this.result.updateResults?.set(document.idx + currentBatchOffset, result); } // Handle delete results. if ('delete' in operation) { - this.result.deleteResults?.set(document.idx, { deletedCount: document.n }); + this.result.deleteResults?.set(document.idx + currentBatchOffset, { + deletedCount: document.n + }); } } } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index b4450f00727..4c1d37519ad 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -513,7 +513,8 @@ function isPinnableCommand(cmd: Document, session?: ClientSession): boolean { 'find' in cmd || 'getMore' in cmd || 'listCollections' in cmd || - 'listIndexes' in cmd + 'listIndexes' in cmd || + 'bulkWrite' in cmd ); } diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index cd32f4968b6..aadf523d722 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -69,6 +69,12 @@ export class ServerDescription { setVersion: number | null; electionId: ObjectId | null; logicalSessionTimeoutMinutes: number | null; + /** The max message size in bytes for the server. */ + maxMessageSizeBytes: number | null; + /** The max number of writes in a bulk write command. */ + maxWriteBatchSize: number | null; + /** The max bson object size. */ + maxBsonObjectSize: number | null; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -111,6 +117,9 @@ export class ServerDescription { this.setVersion = hello?.setVersion ?? null; this.electionId = hello?.electionId ?? null; this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null; + this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null; + this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null; + this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null; this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; diff --git a/src/sdam/topology_description.ts b/src/sdam/topology_description.ts index 436321c7f1a..f171423f599 100644 --- a/src/sdam/topology_description.ts +++ b/src/sdam/topology_description.ts @@ -43,7 +43,6 @@ export class TopologyDescription { heartbeatFrequencyMS: number; localThresholdMS: number; commonWireVersion: number; - /** * Create a TopologyDescription */ diff --git a/test/integration/crud/crud.prose.test.ts b/test/integration/crud/crud.prose.test.ts index 3ddc126d333..1ecd960028f 100644 --- a/test/integration/crud/crud.prose.test.ts +++ b/test/integration/crud/crud.prose.test.ts @@ -3,6 +3,8 @@ import { once } from 'events'; import { type CommandStartedEvent } from '../../../mongodb'; import { + type AnyClientBulkWriteModel, + type ClientSession, type Collection, MongoBulkWriteError, type MongoClient, @@ -151,6 +153,426 @@ describe('CRUD Prose Spec Tests', () => { }); }); + describe('3. MongoClient.bulkWrite batch splits a writeModels input with greater than maxWriteBatchSize operations', function () { + // Test that MongoClient.bulkWrite properly handles writeModels inputs containing a number of writes greater than + // maxWriteBatchSize. + // This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless. + // Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents. + // Perform a hello command using client and record the maxWriteBatchSize value contained in the response. Then, + // construct the following write model (referred to as model): + // InsertOne: { + // "namespace": "db.coll", + // "document": { "a": "b" } + // } + // Construct a list of write models (referred to as models) with model repeated maxWriteBatchSize + 1 times. Execute + // bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult with an + // insertedCount value of maxWriteBatchSize + 1. + // Assert that two CommandStartedEvents (referred to as firstEvent and secondEvent) were observed for the bulkWrite + // command. Assert that the length of firstEvent.command.ops is maxWriteBatchSize. Assert that the length of + // secondEvent.command.ops is 1. If the driver exposes operationIds in its CommandStartedEvents, assert that + // firstEvent.operationId is equal to secondEvent.operationId. + let client: MongoClient; + let maxWriteBatchSize; + const models: AnyClientBulkWriteModel[] = []; + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + await client.connect(); + await client.db('db').collection('coll').drop(); + const hello = await client.db('admin').command({ hello: 1 }); + maxWriteBatchSize = hello.maxWriteBatchSize; + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + commands.length = 0; + + Array.from({ length: maxWriteBatchSize + 1 }, () => { + models.push({ + namespace: 'db.coll', + name: 'insertOne', + document: { a: 'b' } + }); + }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('splits the commands into 2 operations', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } }, + async test() { + const result = await client.bulkWrite(models); + expect(result.insertedCount).to.equal(maxWriteBatchSize + 1); + expect(commands.length).to.equal(2); + expect(commands[0].command.ops.length).to.equal(maxWriteBatchSize); + expect(commands[1].command.ops.length).to.equal(1); + } + }); + }); + + describe('4. MongoClient.bulkWrite batch splits when an ops payload exceeds maxMessageSizeBytes', function () { + // Test that MongoClient.bulkWrite properly handles a writeModels input which constructs an ops array larger + // than maxMessageSizeBytes. + // This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless. + // Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents. + // Perform a hello command using client and record the following values from the response: maxBsonObjectSize + // and maxMessageSizeBytes. Then, construct the following document (referred to as document): + // { + // "a": "b".repeat(maxBsonObjectSize - 500) + // } + // Construct the following write model (referred to as model): + // InsertOne: { + // "namespace": "db.coll", + // "document": document + // } + // Use the following calculation to determine the number of inserts that should be provided to + // MongoClient.bulkWrite: maxMessageSizeBytes / maxBsonObjectSize + 1 (referred to as numModels). This number + // ensures that the inserts provided to MongoClient.bulkWrite will require multiple bulkWrite commands to be + // sent to the server. + // Construct as list of write models (referred to as models) with model repeated numModels times. Then execute + // bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult with + // an insertedCount value of numModels. + // Assert that two CommandStartedEvents (referred to as firstEvent and secondEvent) were observed. Assert + // that the length of firstEvent.command.ops is numModels - 1. Assert that the length of secondEvent.command.ops + // is 1. If the driver exposes operationIds in its CommandStartedEvents, assert that firstEvent.operationId is + // equal to secondEvent.operationId. + let client: MongoClient; + let maxBsonObjectSize; + let maxMessageSizeBytes; + let numModels; + const models: AnyClientBulkWriteModel[] = []; + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + await client.connect(); + await client.db('db').collection('coll').drop(); + const hello = await client.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + maxMessageSizeBytes = hello.maxMessageSizeBytes; + numModels = Math.floor(maxMessageSizeBytes / maxBsonObjectSize + 1); + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + commands.length = 0; + + Array.from({ length: numModels }, () => { + models.push({ + name: 'insertOne', + namespace: 'db.coll', + document: { + a: 'b'.repeat(maxBsonObjectSize - 500) + } + }); + }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('splits the commands into 2 operations', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } }, + async test() { + const result = await client.bulkWrite(models); + expect(result.insertedCount).to.equal(numModels); + expect(commands.length).to.equal(2); + expect(commands[0].command.ops.length).to.equal(numModels - 1); + expect(commands[1].command.ops.length).to.equal(1); + } + }); + }); + + describe('7. MongoClient.bulkWrite handles a cursor requiring a getMore', function () { + // Test that MongoClient.bulkWrite properly iterates the results cursor when getMore is required. + // This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless. + // Construct a MongoClient (referred to as client) with command monitoring enabled to observe + // CommandStartedEvents. Perform a hello command using client and record the maxBsonObjectSize value from the response. + // Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace). + // Drop collection. Then create the following list of write models (referred to as models): + // UpdateOne { + // "namespace": namespace, + // "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) }, + // "update": { "$set": { "x": 1 } }, + // "upsert": true + // }, + // UpdateOne { + // "namespace": namespace, + // "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) }, + // "update": { "$set": { "x": 1 } }, + // "upsert": true + // }, + // Execute bulkWrite on client with models and verboseResults set to true. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result). + // Assert that result.upsertedCount is equal to 2. + // Assert that the length of result.updateResults is equal to 2. + // Assert that a CommandStartedEvent was observed for the getMore command. + let client: MongoClient; + let maxBsonObjectSize; + const models: AnyClientBulkWriteModel[] = []; + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + await client.connect(); + await client.db('db').collection('coll').drop(); + const hello = await client.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + + client.on('commandStarted', filterForCommands('getMore', commands)); + commands.length = 0; + + models.push({ + name: 'updateOne', + namespace: 'db.coll', + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) }, + update: { $set: { x: 1 } }, + upsert: true + }); + models.push({ + name: 'updateOne', + namespace: 'db.coll', + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) }, + update: { $set: { x: 1 } }, + upsert: true + }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('handles a getMore on the results', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } }, + async test() { + const result = await client.bulkWrite(models, { verboseResults: true }); + expect(result.upsertedCount).to.equal(2); + expect(result.updateResults.size).to.equal(2); + expect(commands.length).to.equal(1); + } + }); + }); + + describe('8. MongoClient.bulkWrite handles a cursor requiring getMore within a transaction', function () { + // Test that MongoClient.bulkWrite executed within a transaction properly iterates the results + // cursor when getMore is required. + // This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless. + // This test must not be run against standalone servers. + // Construct a MongoClient (referred to as client) with command monitoring enabled to observe + // CommandStartedEvents. Perform a hello command using client and record the maxBsonObjectSize value from the response. + // Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace). Drop collection. + // Start a session on client (referred to as session). Start a transaction on session. + // Create the following list of write models (referred to as models): + // UpdateOne { + // "namespace": namespace, + // "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) }, + // "update": { "$set": { "x": 1 } }, + // "upsert": true + // }, + // UpdateOne { + // "namespace": namespace, + // "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) }, + // "update": { "$set": { "x": 1 } }, + // "upsert": true + // }, + // Execute bulkWrite on client with models, session, and verboseResults set to true. Assert that the bulk + // write succeeds and returns a BulkWriteResult (referred to as result). + // Assert that result.upsertedCount is equal to 2. + // Assert that the length of result.updateResults is equal to 2. + // Assert that a CommandStartedEvent was observed for the getMore command. + let client: MongoClient; + let session: ClientSession; + let maxBsonObjectSize; + const models: AnyClientBulkWriteModel[] = []; + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + await client.connect(); + await client.db('db').collection('coll').drop(); + const hello = await client.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + + client.on('commandStarted', filterForCommands('getMore', commands)); + commands.length = 0; + + models.push({ + name: 'updateOne', + namespace: 'db.coll', + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) }, + update: { $set: { x: 1 } }, + upsert: true + }); + models.push({ + name: 'updateOne', + namespace: 'db.coll', + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) }, + update: { $set: { x: 1 } }, + upsert: true + }); + + session = client.startSession(); + session.startTransaction(); + }); + + afterEach(async function () { + await session.endSession(); + await client.close(); + }); + + it('handles a getMore on the results in a transaction', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid', topology: '!single' } }, + async test() { + const result = await client.bulkWrite(models, { verboseResults: true, session }); + expect(result.upsertedCount).to.equal(2); + expect(result.updateResults.size).to.equal(2); + expect(commands.length).to.equal(1); + } + }); + }); + + describe('11. MongoClient.bulkWrite batch splits when the addition of a new namespace exceeds the maximum message size', function () { + // Test that MongoClient.bulkWrite batch splits a bulk write when the addition of a new namespace to nsInfo causes the size + // of the message to exceed maxMessageSizeBytes - 1000. + // This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless. + // Repeat the following setup for each test case: + // Setup + // Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents. Perform + // a hello command using client and record the following values from the response: maxBsonObjectSize and maxMessageSizeBytes. + // Calculate the following values: + // opsBytes = maxMessageSizeBytes - 1122 + // numModels = opsBytes / maxBsonObjectSize + // remainderBytes = opsBytes % maxBsonObjectSize + // Construct the following write model (referred to as firstModel): + // InsertOne { + // "namespace": "db.coll", + // "document": { "a": "b".repeat(maxBsonObjectSize - 57) } + // } + // Create a list of write models (referred to as models) with firstModel repeated numModels times. + // If remainderBytes is greater than or equal to 217, add 1 to numModels and append the following write model to models: + // InsertOne { + // "namespace": "db.coll", + // "document": { "a": "b".repeat(remainderBytes - 57) } + // } + // Then perform the following two tests: + let client: MongoClient; + let maxBsonObjectSize; + let maxMessageSizeBytes; + let opsBytes; + let numModels; + let remainderBytes; + let models: AnyClientBulkWriteModel[] = []; + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + await client.connect(); + await client.db('db').collection('coll').drop(); + const hello = await client.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + maxMessageSizeBytes = hello.maxMessageSizeBytes; + opsBytes = maxMessageSizeBytes - 1122; + numModels = Math.floor(opsBytes / maxBsonObjectSize); + remainderBytes = opsBytes % maxBsonObjectSize; + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + commands.length = 0; + models = []; + + Array.from({ length: numModels }, () => { + models.push({ + namespace: 'db.coll', + name: 'insertOne', + document: { a: 'b'.repeat(maxBsonObjectSize - 57) } + }); + }); + + if (remainderBytes >= 217) { + numModels++; + models.push({ + namespace: 'db.coll', + name: 'insertOne', + document: { a: 'b'.repeat(remainderBytes - 57) } + }); + } + }); + + afterEach(async function () { + await client.close(); + }); + + context('when no batch splitting is required', function () { + // Case 1: No batch-splitting required + // Create the following write model (referred to as sameNamespaceModel): + // InsertOne { + // "namespace": "db.coll", + // "document": { "a": "b" } + // } + // Append sameNamespaceModel to models. + // Execute bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result). + // Assert that result.insertedCount is equal to numModels + 1. + // Assert that one CommandStartedEvent was observed for the bulkWrite command (referred to as event). + // Assert that the length of event.command.ops is numModels + 1. Assert that the length of event.command.nsInfo is 1. + // Assert that the namespace contained in event.command.nsInfo is "db.coll". + it('executes in a single batch', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } }, + async test() { + const sameNamespaceModel: AnyClientBulkWriteModel = { + name: 'insertOne', + namespace: 'db.coll', + document: { a: 'b' } + }; + const testModels = models.concat([sameNamespaceModel]); + const result = await client.bulkWrite(testModels); + expect(result.insertedCount).to.equal(numModels + 1); + expect(commands.length).to.equal(1); + expect(commands[0].command.ops.length).to.equal(numModels + 1); + expect(commands[0].command.nsInfo.length).to.equal(1); + expect(commands[0].command.nsInfo[0].ns).to.equal('db.coll'); + } + }); + }); + + context('when batch splitting is required', function () { + // Case 2: Batch-splitting required + // Construct the following namespace (referred to as namespace): + // "db." + "c".repeat(200) + // Create the following write model (referred to as newNamespaceModel): + // InsertOne { + // "namespace": namespace, + // "document": { "a": "b" } + // } + // Append newNamespaceModel to models. + // Execute bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result). + // Assert that result.insertedCount is equal to numModels + 1. + // Assert that two CommandStartedEvents were observed for the bulkWrite command (referred to as firstEvent and secondEvent). + // Assert that the length of firstEvent.command.ops is equal to numModels. Assert that the length of firstEvent.command.nsInfo + // is equal to 1. Assert that the namespace contained in firstEvent.command.nsInfo is "db.coll". + // Assert that the length of secondEvent.command.ops is equal to 1. Assert that the length of secondEvent.command.nsInfo + // is equal to 1. Assert that the namespace contained in secondEvent.command.nsInfo is namespace. + it('executes in multiple batches', { + metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } }, + async test() { + const namespace = `db.${'c'.repeat(200)}`; + const newNamespaceModel: AnyClientBulkWriteModel = { + name: 'insertOne', + namespace: namespace, + document: { a: 'b' } + }; + const testModels = models.concat([newNamespaceModel]); + const result = await client.bulkWrite(testModels); + expect(result.insertedCount).to.equal(numModels + 1); + expect(commands.length).to.equal(2); + expect(commands[0].command.ops.length).to.equal(numModels); + expect(commands[0].command.nsInfo.length).to.equal(1); + expect(commands[0].command.nsInfo[0].ns).to.equal('db.coll'); + expect(commands[1].command.ops.length).to.equal(1); + expect(commands[1].command.nsInfo.length).to.equal(1); + expect(commands[1].command.nsInfo[0].ns).to.equal(namespace); + } + }); + }); + }); + describe('14. `explain` helpers allow users to specify `maxTimeMS`', function () { let client: MongoClient; const commands: CommandStartedEvent[] = []; diff --git a/test/integration/crud/insert.test.js b/test/integration/crud/insert.test.js index f154827d92a..c7c212d91d9 100644 --- a/test/integration/crud/insert.test.js +++ b/test/integration/crud/insert.test.js @@ -1,4 +1,5 @@ 'use strict'; +const semver = require('semver'); const { assert: test, ignoreNsNotFound, setupDatabase } = require('../shared'); const { format: f } = require('util'); const { expect } = require('chai'); @@ -1680,6 +1681,11 @@ describe('crud - insert', function () { }, test: function (done) { + if (semver.satisfies(process.versions.node, '22.7.0')) { + this.skipReason = 'Node.js 22.7.0 has a UTF-8 encoding bug'; + this.skip(); + } + var regexp = /foobaré/; var configuration = this.configuration; diff --git a/test/integration/crud/unicode.test.js b/test/integration/crud/unicode.test.js index b4332e4afc4..ab39df20fb0 100644 --- a/test/integration/crud/unicode.test.js +++ b/test/integration/crud/unicode.test.js @@ -1,4 +1,5 @@ 'use strict'; +const semver = require('semver'); const { assert: test, setupDatabase } = require('../shared'); const { expect } = require('chai'); @@ -13,6 +14,11 @@ describe('Unicode', function () { }, test: function (done) { + if (semver.satisfies(process.versions.node, '22.7.0')) { + this.skipReason = 'Node.js 22.7.0 has a UTF-8 encoding bug'; + this.skip(); + } + var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 }); client.connect(function (err, client) { diff --git a/test/spec/crud/unified/aggregate-write-readPreference.json b/test/spec/crud/unified/aggregate-write-readPreference.json index bc887e83cbc..c1fa3b4574a 100644 --- a/test/spec/crud/unified/aggregate-write-readPreference.json +++ b/test/spec/crud/unified/aggregate-write-readPreference.json @@ -78,11 +78,6 @@ "x": 33 } ] - }, - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [] } ], "tests": [ @@ -159,22 +154,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -250,22 +229,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -344,22 +307,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -438,22 +385,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] } ] diff --git a/test/spec/crud/unified/aggregate-write-readPreference.yml b/test/spec/crud/unified/aggregate-write-readPreference.yml index 86f5a4399c7..16f1035752d 100644 --- a/test/spec/crud/unified/aggregate-write-readPreference.yml +++ b/test/spec/crud/unified/aggregate-write-readPreference.yml @@ -51,9 +51,6 @@ initialData: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - - collectionName: *collection1Name - databaseName: *database0Name - documents: [] tests: - description: "Aggregate with $out includes read preference for 5.0+ server" @@ -78,12 +75,6 @@ tests: $readPreference: *readPreference readConcern: *readConcern writeConcern: *writeConcern - outcome: &outcome - - collectionName: *collection1Name - databaseName: *database0Name - documents: - - { _id: 2, x: 22 } - - { _id: 3, x: 33 } - description: "Aggregate with $out omits read preference for pre-5.0 server" runOnRequirements: @@ -108,7 +99,6 @@ tests: $readPreference: { $$exists: false } readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome - description: "Aggregate with $merge includes read preference for 5.0+ server" runOnRequirements: @@ -131,7 +121,6 @@ tests: $readPreference: *readPreference readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome - description: "Aggregate with $merge omits read preference for pre-5.0 server" runOnRequirements: @@ -152,4 +141,3 @@ tests: $readPreference: { $$exists: false } readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome diff --git a/test/spec/crud/unified/db-aggregate-write-readPreference.json b/test/spec/crud/unified/db-aggregate-write-readPreference.json index 2a81282de81..b6460f001f2 100644 --- a/test/spec/crud/unified/db-aggregate-write-readPreference.json +++ b/test/spec/crud/unified/db-aggregate-write-readPreference.json @@ -52,13 +52,6 @@ } } ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [] - } - ], "tests": [ { "description": "Database-level aggregate with $out includes read preference for 5.0+ server", @@ -141,17 +134,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -235,17 +217,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -332,17 +303,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -429,17 +389,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] } ] diff --git a/test/spec/crud/unified/db-aggregate-write-readPreference.yml b/test/spec/crud/unified/db-aggregate-write-readPreference.yml index 04a3b2169f5..03fcd35aa35 100644 --- a/test/spec/crud/unified/db-aggregate-write-readPreference.yml +++ b/test/spec/crud/unified/db-aggregate-write-readPreference.yml @@ -43,11 +43,6 @@ createEntities: database: *database0 collectionName: &collection0Name coll0 -initialData: - - collectionName: *collection0Name - databaseName: *database0Name - documents: [] - tests: - description: "Database-level aggregate with $out includes read preference for 5.0+ server" runOnRequirements: @@ -73,11 +68,6 @@ tests: $readPreference: *readPreference readConcern: *readConcern writeConcern: *writeConcern - outcome: &outcome - - collectionName: *collection0Name - databaseName: *database0Name - documents: - - { _id: 1 } - description: "Database-level aggregate with $out omits read preference for pre-5.0 server" runOnRequirements: @@ -102,7 +92,6 @@ tests: $readPreference: { $$exists: false } readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome - description: "Database-level aggregate with $merge includes read preference for 5.0+ server" runOnRequirements: @@ -127,7 +116,6 @@ tests: $readPreference: *readPreference readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome - description: "Database-level aggregate with $merge omits read preference for pre-5.0 server" runOnRequirements: @@ -148,4 +136,3 @@ tests: $readPreference: { $$exists: false } readConcern: *readConcern writeConcern: *writeConcern - outcome: *outcome diff --git a/test/unit/cmap/commands.test.ts b/test/unit/cmap/commands.test.ts index f4b3fdf0252..5725f5b2490 100644 --- a/test/unit/cmap/commands.test.ts +++ b/test/unit/cmap/commands.test.ts @@ -15,7 +15,7 @@ describe('commands', function () { context('when there is one document sequence', function () { const command = { test: 1, - field: new DocumentSequence([{ test: 1 }]) + field: new DocumentSequence('field', [{ test: 1 }]) }; const msg = new OpMsgRequest('admin', command, {}); const buffers = msg.toBin(); @@ -53,8 +53,8 @@ describe('commands', function () { context('when there are multiple document sequences', function () { const command = { test: 1, - fieldOne: new DocumentSequence([{ test: 1 }]), - fieldTwo: new DocumentSequence([{ test: 1 }]) + fieldOne: new DocumentSequence('fieldOne', [{ test: 1 }]), + fieldTwo: new DocumentSequence('fieldTwo', [{ test: 1 }]) }; const msg = new OpMsgRequest('admin', command, {}); const buffers = msg.toBin(); diff --git a/test/unit/cmap/connection.test.ts b/test/unit/cmap/connection.test.ts index 75d5c246f24..05e66f3dcfc 100644 --- a/test/unit/cmap/connection.test.ts +++ b/test/unit/cmap/connection.test.ts @@ -1,11 +1,15 @@ +import { Socket } from 'node:net'; + import { expect } from 'chai'; import * as sinon from 'sinon'; +import { setTimeout } from 'timers/promises'; import { connect, Connection, isHello, MongoClientAuthProviders, + MongoDBCollectionNamespace, MongoNetworkTimeoutError, ns } from '../../mongodb'; @@ -142,4 +146,181 @@ describe('new Connection()', function () { expect(beforeHandshakeSymbol).to.be.a('symbol'); expect(error).to.have.property(beforeHandshakeSymbol, true); }); + + describe('NODE-6370: regression test', function () { + class MockSocket extends Socket { + override write(_data: string | Buffer) { + return false; + } + } + + let socket: MockSocket; + let connection: Connection; + + this.timeout(10_000); + + beforeEach(function () { + socket = new MockSocket(); + connection = new Connection(socket, {}); + }); + + const validResponse = Buffer.from( + 'a30000002a0800004b010000dd07000000000000008e000000016f6b00000000000000f03f0324636c757374657254696d65005800000011636c757374657254696d65001c00000093f6f266037369676e61747572650033000000056861736800140000000072d8d6eab4e0703d2d50846e2db7adb5d2733cc4126b65794964000200000026f6f2660000116f7065726174696f6e54696d65001c00000093f6f26600', + 'hex' + ); + + const chunks = [validResponse.slice(0, 10), validResponse.slice(10)]; + + describe('when data is emitted before drain', function () { + describe('first command', function () { + describe('when there is no delay between data and drain', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + + socket.emit('data', validResponse); + socket.emit('drain'); + + await result$; + }); + }); + + describe('when there is a delay between data and drain', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + socket.emit('data', validResponse); + + await setTimeout(10); + + socket.emit('drain'); + await result$; + }); + }); + + describe('when the data comes in multiple chunks', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + socket.emit('data', chunks[0]); + + await setTimeout(10); + socket.emit('drain'); + + socket.emit('data', chunks[1]); + + await result$; + }); + }); + }); + + describe('not first command', function () { + beforeEach(async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + socket.emit('drain'); + socket.emit('data', validResponse); + + await result$; + }); + + describe('when there is no delay between data and drain', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + socket.emit('data', validResponse); + + // await setTimeout(0); + // await setTimeout(10); + socket.emit('drain'); + await result$; + }); + }); + + describe('when there is a delay between data and drain', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + socket.emit('data', validResponse); + + await setTimeout(10); + // await setTimeout(10); + socket.emit('drain'); + await result$; + }); + }); + + describe('when the data comes in multiple chunks', function () { + it('does not hang', async function () { + const result$ = connection.command( + MongoDBCollectionNamespace.fromString('foo.bar'), + { ping: 1 }, + {} + ); + + // there is an await in writeCommand, we must move the event loop forward just enough + // so that we reach the `await drain`. Otherwise, we'll emit both data and drain before + // listeners are attached. + await setTimeout(0); + + socket.emit('data', chunks[0]); + + await setTimeout(10); + + socket.emit('drain'); + + socket.emit('data', chunks[1]); + await result$; + }); + }); + }); + }); + }); }); diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index c8a1406a000..883cc4b4ba7 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -69,11 +69,12 @@ const EXPECTED_EXPORTS = [ 'MongoAWSError', 'MongoAzureError', 'MongoBatchReExecutionError', - 'MongoBulkWriteCursorError', 'MongoBulkWriteError', 'MongoChangeStreamError', 'MongoClient', 'MongoClientAuthProviders', + 'MongoClientBulkWriteCursorError', + 'MongoClientBulkWriteExecutionError', 'MongoCompatibilityError', 'MongoCryptAzureKMSRequestError', 'MongoCryptCreateDataKeyError', diff --git a/test/unit/operations/client_bulk_write/command_builder.test.ts b/test/unit/operations/client_bulk_write/command_builder.test.ts index 6b34ef9a817..e92966795b3 100644 --- a/test/unit/operations/client_bulk_write/command_builder.test.ts +++ b/test/unit/operations/client_bulk_write/command_builder.test.ts @@ -20,7 +20,7 @@ import { } from '../../../mongodb'; describe('ClientBulkWriteCommandBuilder', function () { - describe('#buildCommand', function () { + describe('#buildBatch', function () { context('when custom options are provided', function () { const id = new ObjectId(); const model: ClientInsertOneModel = { @@ -34,39 +34,39 @@ describe('ClientBulkWriteCommandBuilder', function () { ordered: false, comment: { bulk: 'write' } }); - const commands = builder.buildCommands(); + const command = builder.buildBatch(48000000, 100000); it('sets the bulkWrite command', function () { - expect(commands[0].bulkWrite).to.equal(1); + expect(command.bulkWrite).to.equal(1); }); it('sets the errorsOnly field to the inverse of verboseResults', function () { - expect(commands[0].errorsOnly).to.be.false; + expect(command.errorsOnly).to.be.false; }); it('sets the ordered field', function () { - expect(commands[0].ordered).to.be.false; + expect(command.ordered).to.be.false; }); it('sets the bypassDocumentValidation field', function () { - expect(commands[0].bypassDocumentValidation).to.be.true; + expect(command.bypassDocumentValidation).to.be.true; }); it('sets the ops document sequence', function () { - expect(commands[0].ops).to.be.instanceOf(DocumentSequence); - expect(commands[0].ops.documents[0]).to.deep.equal({ + expect(command.ops).to.be.instanceOf(DocumentSequence); + expect(command.ops.documents[0]).to.deep.equal({ insert: 0, document: { _id: id, name: 1 } }); }); it('sets the nsInfo document sequence', function () { - expect(commands[0].nsInfo).to.be.instanceOf(DocumentSequence); - expect(commands[0].nsInfo.documents[0]).to.deep.equal({ ns: 'test.coll' }); + expect(command.nsInfo).to.be.instanceOf(DocumentSequence); + expect(command.nsInfo.documents[0]).to.deep.equal({ ns: 'test.coll' }); }); it('passes comment options into the commands', function () { - expect(commands[0].comment).to.deep.equal({ bulk: 'write' }); + expect(command.comment).to.deep.equal({ bulk: 'write' }); }); }); @@ -79,35 +79,89 @@ describe('ClientBulkWriteCommandBuilder', function () { document: { _id: id, name: 1 } }; const builder = new ClientBulkWriteCommandBuilder([model], {}); - const commands = builder.buildCommands(); + const command = builder.buildBatch(48000000, 100000); it('sets the bulkWrite command', function () { - expect(commands[0].bulkWrite).to.equal(1); + expect(command.bulkWrite).to.equal(1); }); it('sets the default errorsOnly field', function () { - expect(commands[0].errorsOnly).to.be.true; + expect(command.errorsOnly).to.be.true; }); it('sets the default ordered field', function () { - expect(commands[0].ordered).to.be.true; + expect(command.ordered).to.be.true; }); it('sets the ops document sequence', function () { - expect(commands[0].ops).to.be.instanceOf(DocumentSequence); - expect(commands[0].ops.documents[0]).to.deep.equal({ + expect(command.ops).to.be.instanceOf(DocumentSequence); + expect(command.ops.documents[0]).to.deep.equal({ insert: 0, document: { _id: id, name: 1 } }); }); it('sets the nsInfo document sequence', function () { - expect(commands[0].nsInfo).to.be.instanceOf(DocumentSequence); - expect(commands[0].nsInfo.documents[0]).to.deep.equal({ ns: 'test.coll' }); + expect(command.nsInfo).to.be.instanceOf(DocumentSequence); + expect(command.nsInfo.documents[0]).to.deep.equal({ ns: 'test.coll' }); }); }); context('when multiple models are provided', function () { + context('when exceeding the max batch size', function () { + const idOne = new ObjectId(); + const idTwo = new ObjectId(); + const modelOne: ClientInsertOneModel = { + name: 'insertOne', + namespace: 'test.coll', + document: { _id: idOne, name: 1 } + }; + const modelTwo: ClientInsertOneModel = { + name: 'insertOne', + namespace: 'test.coll', + document: { _id: idTwo, name: 2 } + }; + const builder = new ClientBulkWriteCommandBuilder([modelOne, modelTwo], {}); + const commandOne = builder.buildBatch(48000000, 1); + const commandTwo = builder.buildBatch(48000000, 1); + + it('splits the operations into multiple commands', function () { + expect(commandOne.ops.documents).to.deep.equal([ + { insert: 0, document: { _id: idOne, name: 1 } } + ]); + expect(commandTwo.ops.documents).to.deep.equal([ + { insert: 0, document: { _id: idTwo, name: 2 } } + ]); + }); + }); + + context('when exceeding the max message size in bytes', function () { + const idOne = new ObjectId(); + const idTwo = new ObjectId(); + const modelOne: ClientInsertOneModel = { + name: 'insertOne', + namespace: 'test.coll', + document: { _id: idOne, name: 1 } + }; + const modelTwo: ClientInsertOneModel = { + name: 'insertOne', + namespace: 'test.coll', + document: { _id: idTwo, name: 2 } + }; + const builder = new ClientBulkWriteCommandBuilder([modelOne, modelTwo], {}); + const commandOne = builder.buildBatch(1090, 100000); + const commandTwo = builder.buildBatch(1090, 100000); + + it('splits the operations into multiple commands', function () { + expect(commandOne.ops.documents).to.deep.equal([ + { insert: 0, document: { _id: idOne, name: 1 } } + ]); + expect(commandTwo.ops.documents).to.deep.equal([ + { insert: 0, document: { _id: idTwo, name: 2 } } + ]); + }); + }); + context('when the namespace is the same', function () { const idOne = new ObjectId(); const idTwo = new ObjectId(); @@ -122,23 +176,23 @@ describe('ClientBulkWriteCommandBuilder', function () { document: { _id: idTwo, name: 2 } }; const builder = new ClientBulkWriteCommandBuilder([modelOne, modelTwo], {}); - const commands = builder.buildCommands(); + const command = builder.buildBatch(48000000, 100000); it('sets the bulkWrite command', function () { - expect(commands[0].bulkWrite).to.equal(1); + expect(command.bulkWrite).to.equal(1); }); it('sets the ops document sequence', function () { - expect(commands[0].ops).to.be.instanceOf(DocumentSequence); - expect(commands[0].ops.documents).to.deep.equal([ + expect(command.ops).to.be.instanceOf(DocumentSequence); + expect(command.ops.documents).to.deep.equal([ { insert: 0, document: { _id: idOne, name: 1 } }, { insert: 0, document: { _id: idTwo, name: 2 } } ]); }); it('sets the nsInfo document sequence', function () { - expect(commands[0].nsInfo).to.be.instanceOf(DocumentSequence); - expect(commands[0].nsInfo.documents).to.deep.equal([{ ns: 'test.coll' }]); + expect(command.nsInfo).to.be.instanceOf(DocumentSequence); + expect(command.nsInfo.documents).to.deep.equal([{ ns: 'test.coll' }]); }); }); @@ -156,23 +210,23 @@ describe('ClientBulkWriteCommandBuilder', function () { document: { _id: idTwo, name: 2 } }; const builder = new ClientBulkWriteCommandBuilder([modelOne, modelTwo], {}); - const commands = builder.buildCommands(); + const command = builder.buildBatch(48000000, 100000); it('sets the bulkWrite command', function () { - expect(commands[0].bulkWrite).to.equal(1); + expect(command.bulkWrite).to.equal(1); }); it('sets the ops document sequence', function () { - expect(commands[0].ops).to.be.instanceOf(DocumentSequence); - expect(commands[0].ops.documents).to.deep.equal([ + expect(command.ops).to.be.instanceOf(DocumentSequence); + expect(command.ops.documents).to.deep.equal([ { insert: 0, document: { _id: idOne, name: 1 } }, { insert: 1, document: { _id: idTwo, name: 2 } } ]); }); it('sets the nsInfo document sequence', function () { - expect(commands[0].nsInfo).to.be.instanceOf(DocumentSequence); - expect(commands[0].nsInfo.documents).to.deep.equal([ + expect(command.nsInfo).to.be.instanceOf(DocumentSequence); + expect(command.nsInfo.documents).to.deep.equal([ { ns: 'test.coll' }, { ns: 'test.coll2' } ]); @@ -199,15 +253,15 @@ describe('ClientBulkWriteCommandBuilder', function () { document: { _id: idThree, name: 2 } }; const builder = new ClientBulkWriteCommandBuilder([modelOne, modelTwo, modelThree], {}); - const commands = builder.buildCommands(); + const command = builder.buildBatch(48000000, 100000); it('sets the bulkWrite command', function () { - expect(commands[0].bulkWrite).to.equal(1); + expect(command.bulkWrite).to.equal(1); }); it('sets the ops document sequence', function () { - expect(commands[0].ops).to.be.instanceOf(DocumentSequence); - expect(commands[0].ops.documents).to.deep.equal([ + expect(command.ops).to.be.instanceOf(DocumentSequence); + expect(command.ops.documents).to.deep.equal([ { insert: 0, document: { _id: idOne, name: 1 } }, { insert: 1, document: { _id: idTwo, name: 2 } }, { insert: 0, document: { _id: idThree, name: 2 } } @@ -215,8 +269,8 @@ describe('ClientBulkWriteCommandBuilder', function () { }); it('sets the nsInfo document sequence', function () { - expect(commands[0].nsInfo).to.be.instanceOf(DocumentSequence); - expect(commands[0].nsInfo.documents).to.deep.equal([ + expect(command.nsInfo).to.be.instanceOf(DocumentSequence); + expect(command.nsInfo.documents).to.deep.equal([ { ns: 'test.coll' }, { ns: 'test.coll2' } ]); diff --git a/test/unit/operations/client_bulk_write/results_merger.test.ts b/test/unit/operations/client_bulk_write/results_merger.test.ts index ec43843af65..342502eebb4 100644 --- a/test/unit/operations/client_bulk_write/results_merger.test.ts +++ b/test/unit/operations/client_bulk_write/results_merger.test.ts @@ -28,180 +28,282 @@ describe('ClientBulkWriteResultsMerger', function () { describe('#merge', function () { context('when the bulk write is acknowledged', function () { - context('when requesting verbose results', function () { - // An example verbose response from the server without errors: - // { - // cursor: { - // id: Long('0'), - // firstBatch: [ { ok: 1, idx: 0, n: 1 }, { ok: 1, idx: 1, n: 1 } ], - // ns: 'admin.$cmd.bulkWrite' - // }, - // nErrors: 0, - // nInserted: 2, - // nMatched: 0, - // nModified: 0, - // nUpserted: 0, - // nDeleted: 0, - // ok: 1 - // } - context('when there are no errors', function () { - const operations = [ - { insert: 0, document: { _id: 1 } }, - { update: 0 }, - { update: 0 }, - { delete: 0 } - ]; - const documents = [ - { ok: 1, idx: 0, n: 1 }, // Insert - { ok: 1, idx: 1, n: 1, nModified: 1 }, // Update match - { ok: 1, idx: 2, n: 0, upserted: { _id: 1 } }, // Update no match with upsert - { ok: 1, idx: 3, n: 1 } // Delete - ]; - const serverResponse = { - cursor: { - id: new Long('0'), - firstBatch: documents, - ns: 'admin.$cmd.bulkWrite' - }, - nErrors: 0, - nInserted: 1, - nMatched: 1, - nModified: 1, - nUpserted: 1, - nDeleted: 1, - ok: 1 - }; - const response = new ClientBulkWriteCursorResponse(BSON.serialize(serverResponse), 0); - const merger = new ClientBulkWriteResultsMerger({ verboseResults: true }); - let result: ClientBulkWriteResult; - - before(function () { - result = merger.merge(operations, response, documents); - }); + context('when merging on the first batch', function () { + context('when requesting verbose results', function () { + // An example verbose response from the server without errors: + // { + // cursor: { + // id: Long('0'), + // firstBatch: [ { ok: 1, idx: 0, n: 1 }, { ok: 1, idx: 1, n: 1 } ], + // ns: 'admin.$cmd.bulkWrite' + // }, + // nErrors: 0, + // nInserted: 2, + // nMatched: 0, + // nModified: 0, + // nUpserted: 0, + // nDeleted: 0, + // ok: 1 + // } + context('when there are no errors', function () { + const operations = [ + { insert: 0, document: { _id: 1 } }, + { update: 0 }, + { update: 0 }, + { delete: 0 } + ]; + const documents = [ + { ok: 1, idx: 0, n: 1 }, // Insert + { ok: 1, idx: 1, n: 1, nModified: 1 }, // Update match + { ok: 1, idx: 2, n: 0, upserted: { _id: 1 } }, // Update no match with upsert + { ok: 1, idx: 3, n: 1 } // Delete + ]; + const serverResponse = { + cursor: { + id: new Long('0'), + firstBatch: documents, + ns: 'admin.$cmd.bulkWrite' + }, + nErrors: 0, + nInserted: 1, + nMatched: 1, + nModified: 1, + nUpserted: 1, + nDeleted: 1, + ok: 1 + }; + const response = new ClientBulkWriteCursorResponse(BSON.serialize(serverResponse), 0); + const merger = new ClientBulkWriteResultsMerger({ verboseResults: true }); + let result: ClientBulkWriteResult; - it('merges the inserted count', function () { - expect(result.insertedCount).to.equal(1); - }); + before(function () { + result = merger.merge(0, operations, response, documents); + }); - it('sets insert results', function () { - expect(result.insertResults.get(0).insertedId).to.equal(1); - }); + it('merges the inserted count', function () { + expect(result.insertedCount).to.equal(1); + }); - it('merges the upserted count', function () { - expect(result.upsertedCount).to.equal(1); - }); + it('sets insert results', function () { + expect(result.insertResults.get(0).insertedId).to.equal(1); + }); - it('merges the matched count', function () { - expect(result.matchedCount).to.equal(1); - }); + it('merges the upserted count', function () { + expect(result.upsertedCount).to.equal(1); + }); - it('merges the modified count', function () { - expect(result.modifiedCount).to.equal(1); - }); + it('merges the matched count', function () { + expect(result.matchedCount).to.equal(1); + }); - it('sets the update results', function () { - expect(result.updateResults.get(1)).to.deep.equal({ - matchedCount: 1, - modifiedCount: 1, - didUpsert: false + it('merges the modified count', function () { + expect(result.modifiedCount).to.equal(1); }); - }); - it('sets the upsert results', function () { - expect(result.updateResults.get(2)).to.deep.equal({ - matchedCount: 0, - modifiedCount: 0, - upsertedId: 1, - didUpsert: true + it('sets the update results', function () { + expect(result.updateResults.get(1)).to.deep.equal({ + matchedCount: 1, + modifiedCount: 1, + didUpsert: false + }); + }); + + it('sets the upsert results', function () { + expect(result.updateResults.get(2)).to.deep.equal({ + matchedCount: 0, + modifiedCount: 0, + upsertedId: 1, + didUpsert: true + }); }); - }); - it('merges the deleted count', function () { - expect(result.deletedCount).to.equal(1); + it('merges the deleted count', function () { + expect(result.deletedCount).to.equal(1); + }); + + it('sets the delete results', function () { + expect(result.deleteResults.get(3).deletedCount).to.equal(1); + }); }); + }); + + context('when not requesting verbose results', function () { + // An example verbose response from the server without errors: + // { + // cursor: { + // id: Long('0'), + // firstBatch: [], + // ns: 'admin.$cmd.bulkWrite' + // }, + // nErrors: 0, + // nInserted: 2, + // nMatched: 0, + // nModified: 0, + // nUpserted: 0, + // nDeleted: 0, + // ok: 1 + // } + context('when there are no errors', function () { + const operations = [ + { insert: 0, document: { _id: 1 } }, + { update: 0 }, + { update: 0 }, + { delete: 0 } + ]; + const documents = []; + const serverResponse = { + cursor: { + id: new Long('0'), + firstBatch: documents, + ns: 'admin.$cmd.bulkWrite' + }, + nErrors: 0, + nInserted: 1, + nMatched: 1, + nModified: 1, + nUpserted: 1, + nDeleted: 1, + ok: 1 + }; + const response = new ClientBulkWriteCursorResponse(BSON.serialize(serverResponse), 0); + const merger = new ClientBulkWriteResultsMerger({ verboseResults: false }); + let result: ClientBulkWriteResult; + + before(function () { + result = merger.merge(0, operations, response, documents); + }); - it('sets the delete results', function () { - expect(result.deleteResults.get(3).deletedCount).to.equal(1); + it('merges the inserted count', function () { + expect(result.insertedCount).to.equal(1); + }); + + it('sets no insert results', function () { + expect(result.insertResults).to.equal(undefined); + }); + + it('merges the upserted count', function () { + expect(result.upsertedCount).to.equal(1); + }); + + it('merges the matched count', function () { + expect(result.matchedCount).to.equal(1); + }); + + it('merges the modified count', function () { + expect(result.modifiedCount).to.equal(1); + }); + + it('sets no update results', function () { + expect(result.updateResults).to.equal(undefined); + }); + + it('merges the deleted count', function () { + expect(result.deletedCount).to.equal(1); + }); + + it('sets no delete results', function () { + expect(result.deleteResults).to.equal(undefined); + }); }); }); }); - context('when not requesting verbose results', function () { - // An example verbose response from the server without errors: - // { - // cursor: { - // id: Long('0'), - // firstBatch: [], - // ns: 'admin.$cmd.bulkWrite' - // }, - // nErrors: 0, - // nInserted: 2, - // nMatched: 0, - // nModified: 0, - // nUpserted: 0, - // nDeleted: 0, - // ok: 1 - // } - context('when there are no errors', function () { - const operations = [ - { insert: 0, document: { _id: 1 } }, - { update: 0 }, - { update: 0 }, - { delete: 0 } - ]; - const documents = []; - const serverResponse = { - cursor: { - id: new Long('0'), - firstBatch: documents, - ns: 'admin.$cmd.bulkWrite' - }, - nErrors: 0, - nInserted: 1, - nMatched: 1, - nModified: 1, - nUpserted: 1, - nDeleted: 1, - ok: 1 - }; - const response = new ClientBulkWriteCursorResponse(BSON.serialize(serverResponse), 0); - const merger = new ClientBulkWriteResultsMerger({ verboseResults: false }); - let result: ClientBulkWriteResult; - - before(function () { - result = merger.merge(operations, response, documents); - }); + context('when merging on a later batch', function () { + context('when requesting verbose results', function () { + // An example verbose response from the server without errors: + // { + // cursor: { + // id: Long('0'), + // firstBatch: [ { ok: 1, idx: 0, n: 1 }, { ok: 1, idx: 1, n: 1 } ], + // ns: 'admin.$cmd.bulkWrite' + // }, + // nErrors: 0, + // nInserted: 2, + // nMatched: 0, + // nModified: 0, + // nUpserted: 0, + // nDeleted: 0, + // ok: 1 + // } + context('when there are no errors', function () { + const operations = [ + { insert: 0, document: { _id: 1 } }, + { update: 0 }, + { update: 0 }, + { delete: 0 } + ]; + const documents = [ + { ok: 1, idx: 0, n: 1 }, // Insert + { ok: 1, idx: 1, n: 1, nModified: 1 }, // Update match + { ok: 1, idx: 2, n: 0, upserted: { _id: 1 } }, // Update no match with upsert + { ok: 1, idx: 3, n: 1 } // Delete + ]; + const serverResponse = { + cursor: { + id: new Long('0'), + firstBatch: documents, + ns: 'admin.$cmd.bulkWrite' + }, + nErrors: 0, + nInserted: 1, + nMatched: 1, + nModified: 1, + nUpserted: 1, + nDeleted: 1, + ok: 1 + }; + const response = new ClientBulkWriteCursorResponse(BSON.serialize(serverResponse), 0); + const merger = new ClientBulkWriteResultsMerger({ verboseResults: true }); + let result: ClientBulkWriteResult; - it('merges the inserted count', function () { - expect(result.insertedCount).to.equal(1); - }); + before(function () { + result = merger.merge(20, operations, response, documents); + }); - it('sets no insert results', function () { - expect(result.insertResults).to.equal(undefined); - }); + it('merges the inserted count', function () { + expect(result.insertedCount).to.equal(1); + }); - it('merges the upserted count', function () { - expect(result.upsertedCount).to.equal(1); - }); + it('sets insert results', function () { + expect(result.insertResults.get(20).insertedId).to.equal(1); + }); - it('merges the matched count', function () { - expect(result.matchedCount).to.equal(1); - }); + it('merges the upserted count', function () { + expect(result.upsertedCount).to.equal(1); + }); - it('merges the modified count', function () { - expect(result.modifiedCount).to.equal(1); - }); + it('merges the matched count', function () { + expect(result.matchedCount).to.equal(1); + }); - it('sets no update results', function () { - expect(result.updateResults).to.equal(undefined); - }); + it('merges the modified count', function () { + expect(result.modifiedCount).to.equal(1); + }); - it('merges the deleted count', function () { - expect(result.deletedCount).to.equal(1); - }); + it('sets the update results', function () { + expect(result.updateResults.get(21)).to.deep.equal({ + matchedCount: 1, + modifiedCount: 1, + didUpsert: false + }); + }); + + it('sets the upsert results', function () { + expect(result.updateResults.get(22)).to.deep.equal({ + matchedCount: 0, + modifiedCount: 0, + upsertedId: 1, + didUpsert: true + }); + }); - it('sets no delete results', function () { - expect(result.deleteResults).to.equal(undefined); + it('merges the deleted count', function () { + expect(result.deletedCount).to.equal(1); + }); + + it('sets the delete results', function () { + expect(result.deleteResults.get(23).deletedCount).to.equal(1); + }); }); }); });