From 3c2ec0a59fbe70b3c20bb1a23fc0fe528465e5a9 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 11 Apr 2024 17:15:41 -0400 Subject: [PATCH 001/136] feat(NODE-6090): Implement CSOT logic for connection checkout and server selection --- src/admin.ts | 3 +- src/cmap/connection.ts | 4 + src/cmap/connection_pool.ts | 53 ++- src/collection.ts | 5 + src/db.ts | 6 + src/error.ts | 9 + src/index.ts | 1 + src/operations/command.ts | 2 + src/operations/find.ts | 3 +- src/operations/operation.ts | 8 + src/operations/run_command.ts | 9 +- src/sdam/server.ts | 3 +- src/sdam/topology.ts | 54 ++- src/timeout.ts | 14 + src/utils.ts | 10 + ...ient_side_operations_timeout.prose.test.ts | 315 +++++++++++++----- ...lient_side_operations_timeout.unit.test.ts | 140 +++++--- .../node_csot.test.ts | 75 ++++- test/unit/cmap/connection_pool.test.js | 33 +- test/unit/index.test.ts | 1 + 20 files changed, 570 insertions(+), 178 deletions(-) diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..e030384eafc 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 666e92fb8c2..202be1e48af 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,6 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type Timeout } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -94,6 +95,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeout?: Timeout; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..79440db1e06 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,13 +21,14 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; +import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,37 +355,57 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { - const checkoutTime = now(); + async checkOut(options?: { timeout?: Timeout }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + let timeout: Timeout | null = null; + if (options?.timeout) { + // CSOT enabled + // Determine if we're using the timeout passed in or a new timeout + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + // This check determines whether or not Topology.selectServer used the configured + // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + // server selection used `timeoutMS`, so we should use the existing timeout as the timeout + // here + timeout = options.timeout; + } else { + // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with + // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut + // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking + timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); + } + } + } else { + timeout = Timeout.expires(waitQueueTimeoutMS); + } const waitQueueMember: WaitQueueMember = { resolve, - reject, - timeout, - checkoutTime + reject }; this[kWaitQueue].push(waitQueueMember); process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +416,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options?.timeout) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (timeout !== options?.timeout) timeout?.clear(); } } @@ -764,7 +791,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +811,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +853,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..dbd91371cce 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -262,6 +262,11 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior diff --git a/src/db.ts b/src/db.ts index 0dcd24dbb1d..538c8d2c9b4 100644 --- a/src/db.ts +++ b/src/db.ts @@ -222,6 +222,11 @@ export class Db { return this.s.namespace.toString(); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -272,6 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/error.ts b/src/error.ts index 668e9cdbf52..0620d3069f3 100644 --- a/src/error.ts +++ b/src/error.ts @@ -761,6 +761,15 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @internal + */ +export class MongoOperationTimeoutError extends MongoRuntimeError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/index.ts b/src/index.ts index eeebbd1154c..0134027a5f1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -63,6 +63,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, diff --git a/src/operations/command.ts b/src/operations/command.ts index 94ccc6ceafe..c64b4ae963a 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -65,6 +65,7 @@ export interface OperationParent { writeConcern?: WriteConcern; readPreference?: ReadPreference; bsonOptions?: BSONSerializeOptions; + timeoutMS?: number; } /** @internal */ @@ -131,6 +132,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeout: this.timeout, readPreference: this.readPreference, session }; diff --git a/src/operations/find.ts b/src/operations/find.ts index a040af73bc6..0f81f2d61f2 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -116,7 +116,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeout: this.timeout }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/operation.ts b/src/operations/operation.ts index b51cca40201..0599b72b96d 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type Timeout } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -56,6 +57,11 @@ export abstract class AbstractOperation { options: OperationOptions; + /** @internal */ + timeout?: Timeout; + /** @internal */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -73,6 +79,8 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; + + this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..56462fa8843 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -14,6 +14,8 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** @internal */ + timeoutMS?: number; } & BSONSerializeOptions; /** @internal */ @@ -39,10 +41,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }, this.options.responseType ); + return res; } } @@ -68,7 +72,8 @@ export class RunAdminCommandOperation extends AbstractOperation const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }); return res; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index b4450f00727..a1b885382ec 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,7 +311,7 @@ export class Server extends TypedEventEmitter { this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } @@ -336,6 +336,7 @@ export class Server extends TypedEventEmitter { operationError.code === MONGODB_ERROR_CODES.Reauthenticate ) { await this.pool.reauthenticate(conn); + // TODO(NODE-5682): Implement CSOT support for socket read/write at the connection layer try { const res = await conn.command(ns, cmd, finalOptions, responseType); throwIfWriteConcernError(res); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..4c9d71d807d 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -37,6 +38,7 @@ import { Timeout, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, + csotMin, type EventEmitterWithState, HostAddress, List, @@ -107,7 +109,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -457,8 +458,14 @@ export class Topology extends TypedEventEmitter { } } + const timeoutMS = this.client.options.timeoutMS; + const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const selectServerOptions = { + operationName: 'ping', + timeout, + ...options + }; try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), @@ -467,7 +474,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +563,25 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; + let timeout: Timeout | null; + if (options.timeout) { + // CSOT Enabled + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + timeout = options.timeout; + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } + } else { + timeout = null; + } + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +604,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (timeout !== options.timeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +617,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +627,14 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +654,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeout) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (timeout !== options.timeout) timeout?.clear(); } } /** @@ -889,8 +922,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +975,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1062,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..7af1a23f261 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -40,6 +40,16 @@ export class Timeout extends Promise { public duration: number; public timedOut = false; + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } + /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = false) { let reject!: Reject; @@ -78,6 +88,10 @@ export class Timeout extends Promise { this.id = undefined; } + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out'); + } + public static expires(durationMS: number, unref?: boolean): Timeout { return new Timeout(undefined, durationMS, unref); } diff --git a/src/utils.ts b/src/utils.ts index 07fe8e56a66..fa7a3f6509c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -533,6 +533,10 @@ export function resolveOptions( result.readPreference = readPreference; } + const timeoutMS = options?.timeoutMS; + + result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + return result; } @@ -1368,6 +1372,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..903ea9c3bb4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,30 @@ /* Specification prose tests */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now +} from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + context.skip('1. Multi-batch writes', () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -31,7 +53,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { + context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { /** * This test MUST only be run against enterprise server versions 4.2 and higher. * @@ -42,7 +64,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('3. ClientEncryption', () => { + context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, * `LOCAL_MASTERKEY` refers to the following base64: @@ -132,7 +154,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('4. Background Connection Pooling', () => { + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -192,7 +214,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('5. Blocking Iteration Methods', () => { + context.skip('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -251,7 +273,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('6. GridFS - Upload', () => { + context.skip('6. GridFS - Upload', () => { /** Tests in this section MUST only be run against server versions 4.4 and higher. */ context('uploads via openUploadStream can be timed out', () => { @@ -306,7 +328,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('7. GridFS - Download', () => { + context.skip('7. GridFS - Download', () => { /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -351,96 +373,225 @@ describe.skip('CSOT spec prose tests', () => { }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }); + }); + + it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); + + it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); }); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + context.skip('9. endSession', () => { /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -472,7 +623,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('10. Convenient Transactions', () => { + context.skip('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..c1426d8db1d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,105 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(Timeout.expires).to.have.been.calledWith(10000); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + // Check that we passed through the timeout + expect(checkoutSpy.firstCall.args[0].timeout).to.equal( + selectServerSpy.lastCall.lastArg.timeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + context.skip('Client side encryption', function () { + context( + 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', + () => {} + ); + + context( + 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', + () => {} + ); + }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..5636eb00db7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -7,7 +7,9 @@ import { type Collection, type Db, type FindCursor, - type MongoClient + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoOperationTimeoutError } from '../../mongodb'; describe('CSOT driver tests', () => { @@ -94,4 +96,75 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..18048befab4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,7 +5,7 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); @@ -26,6 +26,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -98,7 +101,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -114,23 +117,15 @@ describe('Connection Pool', function () { pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut(); + const err = await pool.checkOut().catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index 980747c8c70..2766c717712 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -107,6 +107,7 @@ const EXPECTED_EXPORTS = [ 'MongoTailableCursorError', 'MongoTopologyClosedError', 'MongoTransactionError', + 'MongoOperationTimeoutError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', 'WriteConcernErrorResult', From 909578fcf0b50753b9c43ca3826099dfc6643030 Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 10 Jun 2024 10:46:02 -0400 Subject: [PATCH 002/136] test(NODE-6120): Implement Unified test runner changes for CSOT (#4121) --- test/spec/unified-test-format/Makefile | 37 +++++- .../collectionData-createOptions.yml | 7 +- .../valid-pass/createEntities-operation.json | 74 ++++++++++++ .../valid-pass/createEntities-operation.yml | 38 ++++++ .../valid-pass/entity-cursor-iterateOnce.json | 111 ++++++++++++++++++ .../valid-pass/entity-cursor-iterateOnce.yml | 59 ++++++++++ .../valid-pass/entity-find-cursor.json | 15 ++- .../valid-pass/entity-find-cursor.yml | 6 +- ...ectedEventsForClient-ignoreExtraEvents.yml | 2 +- .../valid-pass/matches-lte-operator.json | 78 ++++++++++++ .../valid-pass/matches-lte-operator.yml | 41 +++++++ .../valid-pass/poc-change-streams.json | 36 ++++++ .../valid-pass/poc-change-streams.yml | 18 +++ .../valid-pass/poc-crud.json | 2 +- .../valid-pass/poc-crud.yml | 2 +- .../valid-pass/poc-sessions.json | 2 +- .../valid-pass/poc-sessions.yml | 3 +- .../poc-transactions-convenient-api.json | 2 +- .../poc-transactions-convenient-api.yml | 2 +- .../poc-transactions-mongos-pin-auto.json | 2 +- .../poc-transactions-mongos-pin-auto.yml | 2 +- .../valid-pass/poc-transactions.json | 6 +- .../valid-pass/poc-transactions.yml | 6 +- test/tools/unified-spec-runner/match.ts | 32 ++++- test/tools/unified-spec-runner/schema.ts | 1 + 25 files changed, 547 insertions(+), 37 deletions(-) create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.yml create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.json create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.yml diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 3a4d4e5e3da..32f6870d998 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -24,6 +24,7 @@ import { Long, MongoBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -97,6 +98,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -105,7 +119,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -116,7 +131,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -129,7 +145,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -378,6 +395,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -747,6 +767,12 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; From e1017502b1b0d07f8f3c7455cc70bc68eaa785b9 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 21 Jun 2024 12:06:30 -0400 Subject: [PATCH 003/136] refactor(NODE-6187): refactor to use TimeoutContext abstraction (#4131) --- src/bulk/common.ts | 4 + src/cmap/connection.ts | 4 +- src/cmap/connection_pool.ts | 39 +--- src/index.ts | 18 +- src/operations/aggregate.ts | 5 +- src/operations/bulk_write.ts | 11 +- src/operations/command.ts | 8 +- src/operations/count.ts | 9 +- src/operations/create_collection.ts | 18 +- src/operations/delete.ts | 21 +- src/operations/distinct.ts | 9 +- src/operations/drop.ts | 24 ++- src/operations/estimated_document_count.ts | 9 +- src/operations/execute_operation.ts | 16 +- src/operations/find.ts | 6 +- src/operations/find_and_modify.ts | 9 +- src/operations/get_more.ts | 5 +- src/operations/indexes.ts | 22 +- src/operations/insert.ts | 19 +- src/operations/kill_cursors.ts | 12 +- src/operations/list_collections.ts | 5 +- src/operations/list_databases.ts | 11 +- src/operations/operation.ts | 10 +- src/operations/profiling_level.ts | 9 +- src/operations/remove_user.ts | 9 +- src/operations/rename.ts | 9 +- src/operations/run_command.ts | 17 +- src/operations/search_indexes/create.ts | 12 +- src/operations/search_indexes/drop.ts | 9 +- src/operations/search_indexes/update.ts | 9 +- src/operations/set_profiling_level.ts | 6 +- src/operations/stats.ts | 9 +- src/operations/update.ts | 24 ++- src/operations/validate_collection.ts | 9 +- src/sdam/server.ts | 12 +- src/sdam/topology.ts | 55 +++-- src/timeout.ts | 166 +++++++++++++- ...lient_side_operations_timeout.unit.test.ts | 12 +- .../node_csot.test.ts | 2 +- test/tools/cmap_spec_runner.ts | 12 +- test/unit/cmap/connection_pool.test.js | 22 +- test/unit/error.test.ts | 19 +- test/unit/operations/get_more.test.ts | 2 +- test/unit/sdam/topology.test.ts | 76 +++++-- test/unit/timeout.test.ts | 204 +++++++++++++++++- 45 files changed, 796 insertions(+), 202 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index c133a57d227..9eb63382443 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -20,6 +20,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, type Callback, @@ -873,6 +874,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } const executeCommandsAsync = promisify(executeCommands); diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 202be1e48af..9defa144f3c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,7 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type TimeoutContext } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -97,7 +97,7 @@ export interface CommandOptions extends BSONSerializeOptions { directConnection?: boolean; /** @internal */ - timeout?: Timeout; + timeoutContext?: TimeoutContext; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 79440db1e06..5369cc155aa 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -27,8 +27,8 @@ import { } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type TimeoutContext, TimeoutError } from '../timeout'; +import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -355,41 +355,15 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(options?: { timeout?: Timeout }): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - let timeout: Timeout | null = null; - if (options?.timeout) { - // CSOT enabled - // Determine if we're using the timeout passed in or a new timeout - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - // This check determines whether or not Topology.selectServer used the configured - // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - // server selection used `timeoutMS`, so we should use the existing timeout as the timeout - // here - timeout = options.timeout; - } else { - // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with - // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut - // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking - timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); - } - } - } else { - timeout = Timeout.expires(waitQueueTimeoutMS); - } + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, @@ -404,6 +378,7 @@ export class ConnectionPool extends TypedEventEmitter { return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; this.emitAndLog( @@ -416,7 +391,7 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); - if (options?.timeout) { + if (options.timeoutContext.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during connection checkout', { cause: timeoutError }); @@ -425,7 +400,7 @@ export class ConnectionPool extends TypedEventEmitter { } throw error; } finally { - if (timeout !== options?.timeout) timeout?.clear(); + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } diff --git a/src/index.ts b/src/index.ts index 0134027a5f1..31ef5720e10 100644 --- a/src/index.ts +++ b/src/index.ts @@ -542,7 +542,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -573,7 +579,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index a5a267ac3e4..50494cbba73 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -3,6 +3,7 @@ import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/r import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -97,7 +98,8 @@ export class AggregateOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -142,6 +144,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/command.ts b/src/operations/command.ts index c64b4ae963a..5bd80f796d1 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -7,6 +7,7 @@ import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { commandSupportsReadConcern, decorateWithExplain, @@ -112,19 +113,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -132,7 +136,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, - timeout: this.timeout, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..82330a11e76 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -36,7 +37,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +64,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..afb2680b9a0 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -124,7 +125,11 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; @@ -155,7 +160,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +168,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +178,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +186,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +204,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..787bb6e7d0f 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 97e60450739..39937c8abf4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,7 +24,8 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { supportsRetryableWrites } from '../utils'; +import { TimeoutContext } from '../timeout'; +import { squashError, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -86,6 +87,12 @@ export async function executeOperation< ); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -109,7 +116,8 @@ export async function executeOperation< return await tryOperation(operation, { topology, session, - readPreference + readPreference, + timeoutContext }); } finally { if (session?.owner != null && session.owner === owner) { @@ -260,7 +268,7 @@ async function tryOperation< } try { - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; diff --git a/src/operations/find.ts b/src/operations/find.ts index 0f81f2d61f2..5f359324d56 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -5,6 +5,7 @@ import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -98,7 +99,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -117,7 +119,7 @@ export class FindOperation extends CommandOperation { ...this.bsonOptions, documentsReturnedIn: 'firstBatch', session, - timeout: this.timeout + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..c96a5d73453 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,9 +349,13 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } @@ -379,7 +388,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +403,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..702db0fe3f2 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -54,12 +55,14 @@ export class ListCollectionsOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 0599b72b96d..97e12871ee2 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,7 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type Timeout, type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -79,15 +79,17 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; - - this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index 56462fa8843..b91e2d0344e 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -33,7 +34,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -42,7 +47,7 @@ export class RunCommandOperation extends AbstractOperation { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }, this.options.responseType ); @@ -67,13 +72,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 7e5e55d18d6..9661026e3eb 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -32,14 +33,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index 4e287cca012..e9ea0ad01ce 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -4,6 +4,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -31,7 +36,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index aad7f93536c..e88e777d675 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -27,7 +32,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index a1b885382ec..20cb13423c4 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4c9d71d807d..6117b5317cd 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -34,11 +34,10 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, - csotMin, type EventEmitterWithState, HostAddress, List, @@ -179,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-5685): Make this required + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -458,13 +460,20 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.options.timeoutMS; - const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; + const timeoutMS = this.client.s.options.timeoutMS; + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; + + const timeoutContext = TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { operationName: 'ping', - timeout, - ...options + ...options, + timeoutContext }; try { const server = await this.selectServer( @@ -474,7 +483,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -563,24 +572,10 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } - const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; - let timeout: Timeout | null; - if (options.timeout) { - // CSOT Enabled - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - timeout = options.timeout; - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); - } - } else { - timeout = null; - } - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); } const isSharded = this.description.type === TopologyType.Sharded; @@ -604,7 +599,7 @@ export class Topology extends TypedEventEmitter { ) ); } - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } @@ -654,7 +649,7 @@ export class Topology extends TypedEventEmitter { ); } - if (options.timeout) { + if (options.timeoutContext?.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during server selection', { cause: timeoutError }); @@ -664,7 +659,7 @@ export class Topology extends TypedEventEmitter { // Other server selection error throw error; } finally { - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** diff --git a/src/timeout.ts b/src/timeout.ts index 7af1a23f261..3d65992a02b 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,7 +1,7 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { @@ -107,3 +107,165 @@ export class Timeout extends Promise { ); } } + +/** @internal */ +export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract csotEnabled(): this is CSOTTimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _maxTimeMS?: number; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this._maxTimeMS ?? -1; + } + + set maxTimeMS(v: number) { + this._maxTimeMS = v; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object') { + const usingServerSelectionTimeoutMS = + this.serverSelectionTimeoutMS !== 0 && + csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; + + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + } else { + if (this.timeoutMS > 0) { + this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (typeof this._connectionCheckoutTimeout !== 'object') { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } +} diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c1426d8db1d..c4989f58d7f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -33,16 +33,20 @@ describe('CSOT spec unit tests', function () { client = this.configuration.newClient({ timeoutMS: 1000 }); // Spy on connection checkout and pull options argument const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); - const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); const expiresSpy = sinon.spy(Timeout, 'expires'); await client.db('db').collection('collection').insertOne({ x: 1 }); expect(checkoutSpy).to.have.been.calledOnce; - expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; // Check that we passed through the timeout - expect(checkoutSpy.firstCall.args[0].timeout).to.equal( - selectServerSpy.lastCall.lastArg.timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout ); // Check that no more Timeouts are constructed after we enter checkout diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 5636eb00db7..17d85ba5b23 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -143,7 +143,7 @@ describe('CSOT driver tests', () => { }); it('throws a MongoOperationTimeoutError', { - metadata: { requires: { mongodb: '>=4.4' } }, + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index f6d7e68bedc..9bb2abdb87a 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -12,7 +12,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -185,7 +186,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 18048befab4..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -10,8 +10,10 @@ const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -44,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -64,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -93,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -114,11 +122,15 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - const conn = await pool.checkOut(); - const err = await pool.checkOut().catch(e => e); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); sinon.stub(pool, 'availableConnectionCount').get(() => 0); pool.checkIn(conn); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..bdc049cbc4f 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -28,6 +28,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -376,11 +377,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +426,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..17bc20f6fa7 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -69,7 +69,7 @@ describe('GetMoreOperation', function () { const call = stub.getCall(0); expect(call.args[0]).to.equal(namespace); expect(call.args[1]).to.deep.equal(expectedGetMoreCommand); - expect(call.args[2]).to.deep.equal(opts); + expect(call.args[2]).to.containSubset(opts); }); }); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..119d0516a9c 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,6 +1,14 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; @@ -115,3 +123,197 @@ describe('Timeout', function () { }); }); }); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); + }); + }); + + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); + }); + }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); + }); + }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); + }); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); + + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); + }); + }); + }); +}); From e4efd3fde0807c9afd33c44c7d700aa074942008 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 22 Jul 2024 11:17:22 -0400 Subject: [PATCH 004/136] refactor(NODE-6230): executeOperation to use iterative retry mechanism (#4157) --- src/cmap/connection_pool.ts | 6 ++++-- src/operations/execute_operation.ts | 27 ++++++++++++++++----------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5369cc155aa..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -28,7 +28,7 @@ import { import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { type TimeoutContext, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -356,6 +356,7 @@ export class ConnectionPool extends TypedEventEmitter { * explicitly destroyed by the new owner. */ async checkOut(options: { timeoutContext: TimeoutContext }): Promise { + const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) @@ -367,7 +368,8 @@ export class ConnectionPool extends TypedEventEmitter { const waitQueueMember: WaitQueueMember = { resolve, - reject + reject, + checkoutTime }; this[kWaitQueue].push(waitQueueMember); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 39937c8abf4..efd92f19de3 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -25,7 +25,7 @@ import { import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import { TimeoutContext } from '../timeout'; -import { squashError, supportsRetryableWrites } from '../utils'; +import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -87,12 +87,6 @@ export async function executeOperation< ); } - timeoutContext ??= TimeoutContext.create({ - serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, - waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, - timeoutMS: operation.options.timeoutMS - }); - const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -112,12 +106,18 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, - readPreference, - timeoutContext + readPreference }); } finally { if (session?.owner != null && session.owner === owner) { @@ -156,6 +156,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -179,7 +180,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -197,7 +201,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); From 22082c9563d1f913b0b87f8f3c9ddff86427adfe Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 26 Jul 2024 09:55:20 -0400 Subject: [PATCH 005/136] feat(NODE-5682): set maxTimeMS on commands and preempt I/O (#4174) Co-authored-by: Warren James --- src/admin.ts | 5 +- src/cmap/connection.ts | 66 ++++++++++++++++--- src/cmap/wire_protocol/on_data.ts | 17 ++++- src/db.ts | 2 +- src/sdam/topology.ts | 17 +++-- src/timeout.ts | 43 ++++++++++-- ...ient_side_operations_timeout.prose.test.ts | 20 +++--- ...lient_side_operations_timeout.spec.test.ts | 33 +++++++++- .../node_csot.test.ts | 1 - test/integration/node-specific/db.test.js | 22 ++----- test/spec/{index.js => index.ts} | 19 ++---- test/tools/cmap_spec_runner.ts | 3 +- test/tools/unified-spec-runner/entities.ts | 4 +- test/tools/unified-spec-runner/match.ts | 15 ++++- test/tools/unified-spec-runner/operations.ts | 8 +-- test/unit/tools/unified_spec_runner.test.ts | 2 +- 16 files changed, 200 insertions(+), 77 deletions(-) rename test/spec/{index.js => index.ts} (67%) diff --git a/src/admin.ts b/src/admin.ts index e030384eafc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -155,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 9defa144f3c..e3f1b0d753d 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -19,6 +19,7 @@ import { MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -30,7 +31,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type TimeoutContext } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -417,6 +418,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (options.timeoutContext?.csotEnabled()) { + const { maxTimeMS } = options.timeoutContext; + if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -431,7 +437,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -440,7 +448,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse) { @@ -450,7 +459,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -623,7 +642,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -635,8 +658,32 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + if (TimeoutError.is(error)) { + throw new MongoOperationTimeoutError('Timed out at socket write'); + } + throw error; + } + } + return await drainEvent; } /** @@ -648,9 +695,12 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..a32c6b1b484 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,7 @@ import { type EventEmitter } from 'events'; +import { MongoOperationTimeoutError } from '../../error'; +import { type TimeoutContext, TimeoutError } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +20,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -86,6 +91,8 @@ export function onData(emitter: EventEmitter) { // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + // eslint-disable-next-line github/no-then + timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -97,8 +104,12 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - if (promise != null) promise.reject(err); - else error = err; + const timeoutError = TimeoutError.is(err) + ? new MongoOperationTimeoutError('Timed out during socket read') + : undefined; + + if (promise != null) promise.reject(timeoutError ?? err); + else error = timeoutError ?? err; void closeHandler(); } diff --git a/src/db.ts b/src/db.ts index 538c8d2c9b4..07a0c928cca 100644 --- a/src/db.ts +++ b/src/db.ts @@ -277,7 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS, + timeoutMS: options?.timeoutMS ?? this.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6117b5317cd..479003f0e35 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -460,29 +460,28 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.s.options.timeoutMS; + // TODO(NODE-6223): auto connect cannot use timeoutMS + // const timeoutMS = this.client.s.options.timeoutMS; const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const timeoutContext = TimeoutContext.create({ - timeoutMS, + timeoutMS: undefined, serverSelectionTimeoutMS, waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS }); - const selectServerOptions = { operationName: 'ping', ...options, timeoutContext }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { + if (!skipPingOnConnect && this.s.credentials) { await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); @@ -623,7 +622,11 @@ export class Topology extends TypedEventEmitter { try { timeout?.throwIfExpired(); - return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout diff --git a/src/timeout.ts b/src/timeout.ts index 3d65992a02b..cc90b8c2e72 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,6 +1,6 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { csotMin, noop } from './utils'; /** @internal */ @@ -51,7 +51,7 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { + private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; if (duration < 0) { @@ -163,6 +163,10 @@ export abstract class TimeoutContext { abstract get clearConnectionCheckoutTimeout(): boolean; + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + abstract csotEnabled(): this is CSOTTimeoutContext; } @@ -175,13 +179,15 @@ export class CSOTTimeoutContext extends TimeoutContext { clearConnectionCheckoutTimeout: boolean; clearServerSelectionTimeout: boolean; - private _maxTimeMS?: number; - private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + private start: number; constructor(options: CSOTTimeoutContextOptions) { super(); + this.start = Math.trunc(performance.now()); + this.timeoutMS = options.timeoutMS; this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; @@ -193,11 +199,12 @@ export class CSOTTimeoutContext extends TimeoutContext { } get maxTimeMS(): number { - return this._maxTimeMS ?? -1; + return this.remainingTimeMS - this.minRoundTripTime; } - set maxTimeMS(v: number) { - this._maxTimeMS = v; + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; } csotEnabled(): this is CSOTTimeoutContext { @@ -238,6 +245,20 @@ export class CSOTTimeoutContext extends TimeoutContext { } return this._connectionCheckoutTimeout; } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket write'); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket read'); + } } /** @internal */ @@ -268,4 +289,12 @@ export class LegacyTimeoutContext extends TimeoutContext { return Timeout.expires(this.options.waitQueueTimeoutMS); return null; } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 903ea9c3bb4..729bed42199 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -384,7 +384,7 @@ describe('CSOT spec prose tests', function () { clock.restore(); }); - it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. @@ -416,10 +416,11 @@ describe('CSOT spec prose tests', function () { await clock.tickAsync(11); expect(await maybeError).to.be.instanceof(MongoServerSelectionError); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -440,9 +441,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -462,9 +464,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -484,7 +487,8 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..f73f162204f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -3,7 +3,34 @@ import { join } from 'path'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const enabled = [ + 'override-collection-timeoutMS', + 'override-database-timeoutMS', + 'override-operation-timeoutMS' +]; + +const cursorOperations = [ + 'aggregate', + 'countDocuments', + 'listIndexes', + 'createChangeStream', + 'listCollections', + 'listCollectionNames' +]; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests(join('client-side-operations-timeout')); + for (const spec of specs) { + for (const test of spec.tests) { + // not one of the test suites listed in kickoff + if (!enabled.includes(spec.name)) { + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + + // Cursor operation + if (test.operations.find(operation => cursorOperations.includes(operation.name))) + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + } + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 17d85ba5b23..0c97b910836 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -48,7 +48,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 9bb2abdb87a..892f6311df5 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -427,7 +428,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..9f4e20a828e 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 32f6870d998..fb1a759bf52 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -173,7 +173,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -218,6 +219,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -360,7 +365,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -773,6 +778,12 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 51d458a185b..0d7fc18970a 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -293,6 +293,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); @@ -303,7 +304,7 @@ operations.set('drop', async ({ entities, operation }) => { operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -757,11 +758,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); }); From bf95fa497276b7031b1140a2910a188b230cc6f1 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 1 Aug 2024 16:08:39 -0400 Subject: [PATCH 006/136] feat(NODE-6231): Add CSOT behaviour for retryable reads and writes (#4186) --- src/operations/execute_operation.ts | 9 ++++--- src/timeout.ts | 26 ++++++++++++------- ...lient_side_operations_timeout.spec.test.ts | 13 +++++++++- ...lient_side_operations_timeout.unit.test.ts | 10 +++++-- .../node_csot.test.ts | 5 ---- test/tools/unified-spec-runner/match.ts | 2 ++ 6 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index efd92f19de3..c9135fa1c32 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -227,12 +227,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -276,7 +274,6 @@ async function tryOperation< return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -285,6 +282,10 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.serverSelectionTimeout?.clear(); + timeoutContext.connectionCheckoutTimeout?.clear(); } } diff --git a/src/timeout.ts b/src/timeout.ts index cc90b8c2e72..297a484b4ec 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -39,6 +39,7 @@ export class Timeout extends Promise { public ended: number | null = null; public duration: number; public timedOut = false; + public cleared = false; get remainingTime(): number { if (this.timedOut) return 0; @@ -53,7 +54,6 @@ export class Timeout extends Promise { /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; - if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } @@ -86,6 +86,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.cleared = true; } throwIfExpired(): void { @@ -213,16 +214,20 @@ export class CSOTTimeoutContext extends TimeoutContext { get serverSelectionTimeout(): Timeout | null { // check for undefined - if (typeof this._serverSelectionTimeout !== 'object') { + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError( + `Timed out in server selection after ${this.timeoutMS}ms` + ); const usingServerSelectionTimeoutMS = - this.serverSelectionTimeoutMS !== 0 && - csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; - + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; if (usingServerSelectionTimeoutMS) { - this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); } else { - if (this.timeoutMS > 0) { - this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); } else { this._serverSelectionTimeout = null; } @@ -233,7 +238,10 @@ export class CSOTTimeoutContext extends TimeoutContext { } get connectionCheckoutTimeout(): Timeout | null { - if (typeof this._connectionCheckoutTimeout !== 'object') { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { if (typeof this._serverSelectionTimeout === 'object') { // null or Timeout this._connectionCheckoutTimeout = this._serverSelectionTimeout; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index f73f162204f..e4c9eb3027c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -6,7 +6,9 @@ import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const enabled = [ 'override-collection-timeoutMS', 'override-database-timeoutMS', - 'override-operation-timeoutMS' + 'override-operation-timeoutMS', + 'retryability-legacy-timeouts', + 'retryability-timeoutMS' ]; const cursorOperations = [ @@ -18,6 +20,11 @@ const cursorOperations = [ 'listCollectionNames' ]; +const bulkWriteOperations = [ + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' +]; + describe('CSOT spec tests', function () { const specs = loadSpecTests(join('client-side-operations-timeout')); for (const spec of specs) { @@ -30,6 +37,10 @@ describe('CSOT spec tests', function () { // Cursor operation if (test.operations.find(operation => cursorOperations.includes(operation.name))) test.skipReason = 'TODO(NODE-5684): Not working yet'; + + if (bulkWriteOperations.includes(test.description)) + test.skipReason = + 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } runUnifiedSuite(specs); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c4989f58d7f..944d9b96048 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -7,7 +7,7 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; +import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -22,10 +22,16 @@ describe('CSOT spec unit tests', function () { it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); await client.db('db').collection('collection').insertOne({ x: 1 }); - expect(Timeout.expires).to.have.been.calledWith(10000); + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); expect(Timeout.expires).to.not.have.been.calledWith(999999); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 0c97b910836..63e2d97dd90 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,6 +1,5 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; -import * as sinon from 'sinon'; import { type ClientSession, @@ -13,10 +12,6 @@ import { } from '../../mongodb'; describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); - describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index fb1a759bf52..5c4ea000def 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -778,6 +778,8 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their + // errorResponse field if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { From c63d102f13ae13c85663c4b9167bcb9b8692f932 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 12 Aug 2024 16:46:06 -0400 Subject: [PATCH 007/136] feat(NODE-6312): add error transformation for server timeouts (#4192) --- src/cmap/connection.ts | 29 ++++ src/cmap/wire_protocol/responses.ts | 36 +++- .../node_csot.test.ts | 163 +++++++++++++++++- 3 files changed, 225 insertions(+), 3 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index e3f1b0d753d..fbc54c944c4 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -16,6 +16,7 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, @@ -538,6 +539,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -607,6 +613,29 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index e69cf84cfc2..e0c48a2ea74 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -11,7 +11,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -111,6 +111,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 63e2d97dd90..d7d4a4ede5a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,17 +1,23 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; import { + BSON, type ClientSession, type Collection, + Connection, type Db, type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, - MongoOperationTimeoutError + MongoOperationTimeoutError, + MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', () => { +describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -161,4 +167,157 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded; + let commandsFailed; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command failed', async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + }); + }); + + afterEach(() => sinon.restore()); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + }); + }); + }); }); From 1eab23d2ae74c401b6dfb2408931fdc219539c57 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 9 Sep 2024 11:11:11 -0400 Subject: [PATCH 008/136] feat(NODE-6313): add CSOT support to sessions and transactions (#4199) --- package-lock.json | 82 +++--- package.json | 2 +- src/cmap/connection.ts | 7 + src/cmap/wire_protocol/on_data.ts | 15 +- src/collection.ts | 12 +- src/db.ts | 22 +- src/error.ts | 3 + src/operations/execute_operation.ts | 8 +- src/sessions.ts | 255 ++++++++++++------ src/timeout.ts | 49 +++- src/transactions.ts | 7 +- src/utils.ts | 13 +- ...ient_side_operations_timeout.prose.test.ts | 167 +++++++++++- ...lient_side_operations_timeout.spec.test.ts | 18 +- .../node_csot.test.ts | 150 +++++++++++ .../sessions-inherit-timeoutMS.json | 28 +- .../sessions-inherit-timeoutMS.yml | 19 +- ...sessions-override-operation-timeoutMS.json | 32 ++- .../sessions-override-operation-timeoutMS.yml | 23 +- .../sessions-override-timeoutMS.json | 28 +- .../sessions-override-timeoutMS.yml | 19 +- test/tools/unified-spec-runner/entities.ts | 4 + test/tools/unified-spec-runner/match.ts | 19 +- test/tools/unified-spec-runner/operations.ts | 27 +- 24 files changed, 776 insertions(+), 233 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b07cd361d5..1d9cebf509b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", + "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index 479356905dc..2de0e1811f0 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index fbc54c944c4..91fe37662b7 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -738,6 +738,13 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + if (TimeoutError.is(readError)) { + throw new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + } + throw readError; } finally { this.dataEvents = null; this.throwIfAborted(); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index a32c6b1b484..23fd88e2828 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,7 +1,6 @@ import { type EventEmitter } from 'events'; -import { MongoOperationTimeoutError } from '../../error'; -import { type TimeoutContext, TimeoutError } from '../../timeout'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -91,8 +90,11 @@ export function onData( // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); // eslint-disable-next-line github/no-then - timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); + timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -104,12 +106,9 @@ export function onData( function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - const timeoutError = TimeoutError.is(err) - ? new MongoOperationTimeoutError('Timed out during socket read') - : undefined; - if (promise != null) promise.reject(timeoutError ?? err); - else error = timeoutError ?? err; + if (promise != null) promise.reject(err); + else error = err; void closeHandler(); } diff --git a/src/collection.ts b/src/collection.ts index dbd91371cce..f3a206b0c7b 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -470,10 +470,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } diff --git a/src/db.ts b/src/db.ts index 07a0c928cca..cbb0eac13f8 100644 --- a/src/db.ts +++ b/src/db.ts @@ -275,12 +275,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS ?? this.timeoutMS, - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -385,7 +389,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } diff --git a/src/error.ts b/src/error.ts index 0620d3069f3..3867553370b 100644 --- a/src/error.ts +++ b/src/error.ts @@ -124,6 +124,9 @@ function isAggregateError(e: unknown): e is Error & { errors: Error[] } { * mongodb-client-encryption has a dependency on this error, it uses the constructor with a string argument */ export class MongoError extends Error { + get [Symbol.toStringTag]() { + return this.name; + } /** @internal */ [kErrorLabels]: Set; /** diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index c9135fa1c32..f9d9f9b63b4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -58,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -81,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -107,6 +102,7 @@ export async function executeOperation< } timeoutContext ??= TimeoutContext.create({ + session, serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, timeoutMS: operation.options.timeoutMS diff --git a/src/sessions.ts b/src/sessions.ts index bad966ed71c..bbd1785275f 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,11 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +102,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** @internal */ + timeoutMS?: number; } /** @@ -115,7 +122,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -137,6 +144,9 @@ export class ClientSession /** @internal */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -149,7 +159,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -269,8 +279,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -286,10 +301,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -441,8 +452,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -492,8 +505,25 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (firstCommitError) { if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) { @@ -503,7 +533,7 @@ export class ClientSession this.unpin({ force: true }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (retryCommitError) { // If the retry failed, we process that error instead of the original @@ -535,8 +565,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -581,18 +616,45 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -647,96 +709,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - - let committed = false; - let result: any; - while (!committed) { - this.startTransaction(options); // may throw on error + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - result = await promise; + let committed = false; + let result: any; - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } + result = await promise; - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index 297a484b4ec..f057bdb90b4 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -52,12 +55,19 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = true) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; + if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -67,16 +77,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -90,11 +104,11 @@ export class Timeout extends Promise { } throwIfExpired(): void { - if (this.timedOut) throw new TimeoutError('Timed out'); + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); } static is(timeout: unknown): timeout is Timeout { @@ -107,10 +121,16 @@ export class Timeout extends Promise { typeof timeout.then === 'function' ); } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } } /** @internal */ -export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; /** @internal */ export type LegacyTimeoutContextOptions = { @@ -151,6 +171,7 @@ function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions /** @internal */ export abstract class TimeoutContext { static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); else throw new MongoRuntimeError('Unrecognized options'); @@ -183,7 +204,7 @@ export class CSOTTimeoutContext extends TimeoutContext { private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; public minRoundTripTime = 0; - private start: number; + public start: number; constructor(options: CSOTTimeoutContextOptions) { super(); @@ -217,8 +238,8 @@ export class CSOTTimeoutContext extends TimeoutContext { if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { const { remainingTimeMS, serverSelectionTimeoutMS } = this; if (remainingTimeMS <= 0) - throw new MongoOperationTimeoutError( - `Timed out in server selection after ${this.timeoutMS}ms` + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) ); const usingServerSelectionTimeoutMS = serverSelectionTimeoutMS !== 0 && @@ -258,14 +279,14 @@ export class CSOTTimeoutContext extends TimeoutContext { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket write'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); } get timeoutForSocketRead(): Timeout | null { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket read'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..db251c82c16 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; @@ -68,7 +68,10 @@ export interface TransactionOptions extends CommandOperationOptions { writeConcern?: WriteConcern; /** A default read preference for commands in this transaction */ readPreference?: ReadPreferenceLike; - /** Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds */ + /** + * Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds + * @deprecated This option is deprecated in favor of `timeoutMS` or `defaultTimeoutMS`. + */ maxCommitTimeMS?: number; } diff --git a/src/utils.ts b/src/utils.ts index fa7a3f6509c..11302b759bb 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -506,6 +506,10 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -533,9 +537,14 @@ export function resolveOptions( result.readPreference = readPreference; } - const timeoutMS = options?.timeoutMS; + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } - result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + result.timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; return result; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 729bed42199..406aa53ed6a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,6 +1,7 @@ /* Specification prose tests */ import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { @@ -9,6 +10,7 @@ import { MongoServerSelectionError, now } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -595,7 +597,10 @@ describe('CSOT spec prose tests', function () { 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context.skip('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -625,12 +630,92 @@ describe('CSOT spec prose tests', function () { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context.skip('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -641,7 +726,7 @@ describe('CSOT spec prose tests', function () { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -658,6 +743,80 @@ describe('CSOT spec prose tests', function () { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index e4c9eb3027c..a178cecc5d2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,4 +1,5 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; @@ -8,7 +9,10 @@ const enabled = [ 'override-database-timeoutMS', 'override-operation-timeoutMS', 'retryability-legacy-timeouts', - 'retryability-timeoutMS' + 'retryability-timeoutMS', + 'sessions-override-operation-timeoutMS', + 'sessions-override-timeoutMS', + 'sessions-inherit-timeoutMS' ]; const cursorOperations = [ @@ -43,5 +47,15 @@ describe('CSOT spec tests', function () { 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } - runUnifiedSuite(specs); + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index d7d4a4ede5a..cc767c1d80a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -12,6 +12,7 @@ import { type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, + MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; @@ -320,4 +321,153 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { }); }); }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); }); diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 9f4e20a828e..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 5c4ea000def..8cdcc765fc8 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -501,6 +501,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -515,9 +522,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -526,9 +531,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -542,9 +545,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 0d7fc18970a..d43f541aae1 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -19,6 +19,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +50,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -231,7 +227,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -361,7 +362,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -708,13 +709,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -935,7 +940,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -949,7 +954,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); From 4c4b0a9f7a61202ef422e06aa4dc5ab2e941023b Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 11:35:27 -0400 Subject: [PATCH 009/136] feat(NODE-6304): add CSOT support for non-tailable cursors (#4195) --- src/cmap/connection.ts | 4 +- src/cmap/wire_protocol/on_data.ts | 1 - src/collection.ts | 6 +- src/cursor/abstract_cursor.ts | 146 +++- src/cursor/aggregation_cursor.ts | 20 +- src/cursor/change_stream_cursor.ts | 6 +- src/cursor/find_cursor.ts | 2 +- src/cursor/list_collections_cursor.ts | 2 +- src/cursor/list_indexes_cursor.ts | 2 +- src/cursor/run_command_cursor.ts | 14 +- src/index.ts | 2 +- src/operations/aggregate.ts | 4 + src/operations/execute_operation.ts | 3 +- src/operations/find.ts | 4 + src/operations/indexes.ts | 9 +- src/operations/list_collections.ts | 3 + src/operations/operation.ts | 3 + src/operations/run_command.ts | 2 + src/sessions.ts | 12 +- src/timeout.ts | 27 +- ...ient_side_operations_timeout.prose.test.ts | 84 ++- ...lient_side_operations_timeout.spec.test.ts | 83 ++- .../node_csot.test.ts | 335 ++++++++- .../command-execution.json | 153 ++++ .../client-side-operations-timeout/README.md | 661 ++++++++++++++++++ .../change-streams.json | 20 +- .../change-streams.yml | 30 +- .../close-cursors.json | 12 +- .../close-cursors.yml | 12 +- .../command-execution.json | 2 +- .../command-execution.yml | 5 +- .../convenient-transactions.json | 22 +- .../convenient-transactions.yml | 15 +- .../deprecated-options.json | 2 +- .../deprecated-options.yml | 2 +- .../gridfs-advanced.yml | 2 +- .../non-tailable-cursors.json | 20 +- .../non-tailable-cursors.yml | 32 +- .../retryability-timeoutMS.json | 250 +++++++ .../retryability-timeoutMS.yml | 100 +++ .../tailable-awaitData.json | 14 +- .../tailable-awaitData.yml | 18 +- .../tailable-non-awaitData.json | 10 +- .../tailable-non-awaitData.yml | 12 +- test/tools/unified-spec-runner/operations.ts | 7 +- test/unit/cursor/aggregation_cursor.test.ts | 67 +- 46 files changed, 2008 insertions(+), 234 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json create mode 100644 test/spec/client-side-operations-timeout/README.md diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 91fe37662b7..83f16ac9bd2 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -86,6 +86,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -419,7 +420,7 @@ export class Connection extends TypedEventEmitter { ...options }; - if (options.timeoutContext?.csotEnabled()) { + if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { const { maxTimeMS } = options.timeoutContext; if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } @@ -615,7 +616,6 @@ export class Connection extends TypedEventEmitter { for await (const document of this.sendCommand(ns, command, options, responseType)) { if (options.timeoutContext?.csotEnabled()) { if (MongoDBResponse.is(document)) { - // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT if (document.isMaxTimeExpiredError) { throw new MongoOperationTimeoutError('Server reported a timeout error', { cause: new MongoServerError(document.toObject()) diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 23fd88e2828..64c636f41f1 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -93,7 +93,6 @@ export function onData( const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; timeoutForSocketRead?.throwIfExpired(); - // eslint-disable-next-line github/no-then timeoutForSocketRead?.then(undefined, errorHandler); return iterator; diff --git a/src/collection.ts b/src/collection.ts index f3a206b0c7b..a73a5276f5f 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -678,7 +678,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + if (error instanceof MongoOperationTimeoutError) throw error; // TODO: Check the spec for index management behaviour/file a drivers ticket for this + // Seems like we should throw all errors return false; } } diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..d0f386923ad 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,17 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** @public*/ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** @public + * TODO(NODE-5688): Document and release + * */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -105,6 +117,8 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { noCursorTimeout?: boolean; /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -117,6 +131,8 @@ export type InternalAbstractCursorOptions = Omit { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -658,6 +727,8 @@ export abstract class AbstractCursor< this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -707,7 +778,7 @@ export abstract class AbstractCursor< } ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,6 +789,12 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }); + } try { const state = await this._initialize(this.cursorSession); const response = state.response; @@ -729,7 +806,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -763,6 +840,7 @@ export abstract class AbstractCursor< // otherwise need to call getMore const batchSize = this.cursorOptions.batchSize || 1000; + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; try { const response = await this.getMore(batchSize); @@ -770,7 +848,7 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); + await this.cleanup(undefined, error); } catch (error) { // `cleanupCursor` should never throw, squash and throw the original error squashError(error); @@ -791,7 +869,7 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; try { @@ -806,11 +884,23 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + let timeoutContext: TimeoutContext | undefined; + if (timeoutMS != null) { + this.timeoutContext?.clear(); + timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }); + } else { + this.timeoutContext?.refresh(); + timeoutContext = this.timeoutContext; + } await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContext ); } } catch (error) { diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 622bce14aa1..9c305f07e09 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,4 +1,5 @@ import type { Document } from '../bson'; +import { MongoAPIError } from '../error'; import type { ExplainVerbosityLike } from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; @@ -9,6 +10,7 @@ import { mergeOptions, type MongoDBNamespace } from '../utils'; import { AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -38,6 +40,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -60,7 +71,7 @@ export class AggregationCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } @@ -95,6 +106,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..13f58675552 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index ef21cea290f..653b8c2bfc2 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -69,7 +69,7 @@ export class FindCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, findOperation); + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..6b31ce2263a 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,20 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** @internal */ + timeoutMS?: number; + /** @internal */ + timeoutMode?: CursorTimeoutMode; } & BSONSerializeOptions; /** @public */ @@ -105,7 +113,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +131,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/index.ts b/src/index.ts index 31ef5720e10..adcb76635b8 100644 --- a/src/index.ts +++ b/src/index.ts @@ -106,7 +106,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, type CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 50494cbba73..096fe372715 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -36,6 +37,9 @@ export interface AggregateOptions extends CommandOperationOptions { let?: Document; out?: string; + + /** @internal */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index f9d9f9b63b4..dd9ba06c514 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -280,8 +280,7 @@ async function tryOperation< previousOperationError = operationError; // Reset timeouts - timeoutContext.serverSelectionTimeout?.clear(); - timeoutContext.connectionCheckoutTimeout?.clear(); + timeoutContext.clear(); } } diff --git a/src/operations/find.ts b/src/operations/find.ts index 5f359324d56..c39695cc0bc 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -64,6 +65,9 @@ export interface FindOptions * @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored. */ oplogReplay?: boolean; + + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index c96a5d73453..220d438d834 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,7 +1,7 @@ import type { Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Collection } from '../collection'; -import { type AbstractCursorOptions } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; @@ -360,7 +360,12 @@ export class DropIndexOperation extends CommandOperation { } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index 702db0fe3f2..50df243a3ff 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,5 +1,6 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -16,6 +17,8 @@ export interface ListCollectionsOptions extends Omit { public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; public cleared = false; get remainingTime(): number { @@ -100,6 +100,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; this.cleared = true; } @@ -190,6 +191,10 @@ export abstract class TimeoutContext { abstract get timeoutForSocketRead(): Timeout | null; abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; } /** @internal */ @@ -288,6 +293,18 @@ export class CSOTTimeoutContext extends TimeoutContext { if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } } /** @internal */ @@ -326,4 +343,12 @@ export class LegacyTimeoutContext extends TimeoutContext { get timeoutForSocketRead(): Timeout | null { return null; } + + refresh(): void { + return; + } + + clear(): void { + return; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 406aa53ed6a..0d36998fd96 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { type CommandStartedEvent } from '../../../mongodb'; import { + type CommandSucceededEvent, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -216,12 +218,52 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('5. Blocking Iteration Methods', () => { + context('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 20 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient.db('db').dropCollection('coll'); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -248,6 +290,29 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it.skip('send correct number of finds and getMores', async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true, awaitData: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6305)'; }); context('Change Streams', () => { @@ -272,6 +337,23 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it.skip('sends correct number of aggregate and getMores', async function () { + const changeStream = client.db('db').collection('coll').watch(); + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 1 getMore + expect(getMores).to.have.lengthOf(1); + }).skipReason = 'TODO(NODE-6305)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index a178cecc5d2..99914fa08e7 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -4,49 +4,55 @@ import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -const enabled = [ - 'override-collection-timeoutMS', - 'override-database-timeoutMS', - 'override-operation-timeoutMS', - 'retryability-legacy-timeouts', - 'retryability-timeoutMS', - 'sessions-override-operation-timeoutMS', - 'sessions-override-timeoutMS', - 'sessions-inherit-timeoutMS' -]; +const skippedSpecs = { + bulkWrite: 'TODO(NODE-6274)', + 'change-streams': 'TODO(NODE-6035)', + 'convenient-transactions': 'TODO(NODE-5687)', + 'deprecated-options': 'TODO(NODE-5689)', + 'gridfs-advanced': 'TODO(NODE-6275)', + 'gridfs-delete': 'TODO(NODE-6275)', + 'gridfs-download': 'TODO(NODE-6275)', + 'gridfs-find': 'TODO(NODE-6275)', + 'gridfs-upload': 'TODO(NODE-6275)', + 'tailable-awaitData': 'TODO(NODE-6035)', + 'tailable-non-awaitData': 'TODO(NODE-6035)' +}; -const cursorOperations = [ - 'aggregate', - 'countDocuments', - 'listIndexes', - 'createChangeStream', - 'listCollections', - 'listCollectionNames' -]; - -const bulkWriteOperations = [ - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' -]; +const skippedTests = { + 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': + 'TODO(NODE-6305)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': + 'TODO(NODE-6274)', + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': + 'TODO(NODE-6274)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' +}; describe('CSOT spec tests', function () { - const specs = loadSpecTests(join('client-side-operations-timeout')); + const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { - // not one of the test suites listed in kickoff - if (!enabled.includes(spec.name)) { - test.skipReason = 'TODO(NODE-5684): Not working yet'; + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; } - - // Cursor operation - if (test.operations.find(operation => cursorOperations.includes(operation.name))) - test.skipReason = 'TODO(NODE-5684): Not working yet'; - - if (bulkWriteOperations.includes(test.description)) - test.skipReason = - 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } + runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; if ( @@ -59,3 +65,10 @@ describe('CSOT spec tests', function () { return false; }); }); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); +}); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index cc767c1d80a..f5ada7eef9f 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,6 @@ /* Anything javascript specific relating to timeouts */ +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -7,6 +9,9 @@ import { BSON, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, Connection, type Db, type FindCursor, @@ -18,7 +23,9 @@ import { } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -171,8 +178,8 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('server-side maxTimeMS errors are transformed', () => { let client: MongoClient; - let commandsSucceeded; - let commandsFailed; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); @@ -221,18 +228,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command failed', async () => { - const error = await client - .db() - .command({ ping: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.property('code', 50); - - expect(commandsFailed).to.have.lengthOf(1); - expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { @@ -267,18 +278,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { afterEach(() => sinon.restore()); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client - .db('admin') - .command({ giveMeWriteErrors: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { @@ -306,22 +321,266 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: 'iteration', timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + await cursor.toArray(); + + expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores + expect( + commandStarted.filter(ev => { + return ( + ev.command.find != null && + ev.command.getMore != null && + ev.command.maxTimeMS != null + ); + }) + ).to.have.lengthOf(0); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient .db() - .collection('a') - .insertOne({}) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeConcernError.code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); }); }); }); + describe.skip('Tailable non-awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe.skip('Tailable awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index d43f541aae1..fd8f6f66fcc 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -205,7 +205,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -777,7 +778,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); From 558d416e3430db048a4561777a2d77f641252994 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 12 Sep 2024 15:24:39 -0400 Subject: [PATCH 010/136] fix(NODE-6374): MongoOperationTimeoutError inherits MongoRuntimeError (#4237) --- etc/notes/errors.md | 6 +++++- src/error.ts | 21 ++++++++++++++++++--- test/unit/error.test.ts | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..114bc1b2e2c 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,10 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +- TODO(NODE-5688): Add MongoOperationTimeoutError documentation + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/src/error.ts b/src/error.ts index 3867553370b..729a4a51a72 100644 --- a/src/error.ts +++ b/src/error.ts @@ -310,7 +310,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -765,9 +765,24 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } /** - * @internal + * @public + * @category Error + * + * This error is thrown when an operation could not be completed within the specified `timeoutMS`. + * TODO(NODE-5688): expand this documentation. + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` */ -export class MongoOperationTimeoutError extends MongoRuntimeError { +export class MongoOperationTimeoutError extends MongoDriverError { override get name(): string { return 'MongoOperationTimeoutError'; } diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index bdc049cbc4f..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -173,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { From 3ed4a149a4a878c3d68ca7226ddfd5fc5b78de8c Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 16:02:50 -0400 Subject: [PATCH 011/136] test: remove empty skipped context blocks (#4238) --- .../client-side-operations-timeout/node_csot.test.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f5ada7eef9f..56127cc8ace 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -576,11 +576,6 @@ describe('CSOT driver tests', metadata, () => { }); }); - describe.skip('Tailable non-awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe.skip('Tailable awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } From d3438ea60d9b80fe92048f6728920495f91a1f16 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 17 Sep 2024 13:27:43 -0400 Subject: [PATCH 012/136] feat(NODE-5844): add iscryptd to ServerDescription (#4239) --- src/sdam/server_description.ts | 4 ++ .../server_description.test.ts | 56 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 test/integration/server-discovery-and-monitoring/server_description.test.ts diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index cd32f4968b6..d5b67c18080 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -69,6 +69,8 @@ export class ServerDescription { setVersion: number | null; electionId: ObjectId | null; logicalSessionTimeoutMinutes: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -114,6 +116,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -167,6 +170,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..0a3c7eecbf6 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,56 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; + +import { MongoClient } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); From ff561e3b3beee17cf14ccf1ec09351e9a5da7b84 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 19 Sep 2024 17:37:24 -0400 Subject: [PATCH 013/136] temp --- src/client-side-encryption/state_machine.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index af3ea4c215d..5318b250c5a 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -18,6 +18,7 @@ import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; import { type MongocryptdManager } from './mongocryptd_manager'; import { type KMSProviders } from './providers'; +import { TimeoutContext } from '../timeout'; let socks: SocksLib | null = null; function loadSocks(): SocksLib { @@ -182,7 +183,7 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext): Promise { + async execute(executor: StateMachineExecutable, context: MongoCryptContext, timeoutContext?: TimeoutContext): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -201,6 +202,8 @@ export class StateMachine { 'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined' ); } + + // TODO: timeout here const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); if (collInfo) { @@ -233,6 +236,7 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); + // TODO: timeout here const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); if (keys.length === 0) { From 164780c5fab6d6179aee92af3bef99eda6ce6353 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Fri, 20 Sep 2024 16:20:23 -0400 Subject: [PATCH 014/136] temp --- src/client-side-encryption/state_machine.ts | 55 +++++++++++++++------ src/operations/operation.ts | 2 +- 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 5318b250c5a..fbe37ad36d5 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -18,7 +18,9 @@ import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; import { type MongocryptdManager } from './mongocryptd_manager'; import { type KMSProviders } from './providers'; -import { TimeoutContext } from '../timeout'; +import { CSOTTimeoutContext, TimeoutContext } from '../timeout'; +import { timeLog } from 'console'; +import { RunCommandOptions } from '../operations/run_command'; let socks: SocksLib | null = null; function loadSocks(): SocksLib { @@ -203,9 +205,12 @@ export class StateMachine { ); } - // TODO: timeout here - const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); - + const collInfo = await this.fetchCollectionInfo( + metaDataClient, + context.ns, + filter, + timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + ) if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -225,9 +230,20 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind(this, mongocryptdClient, context.ns, command) + this.markCommand.bind( + this, + mongocryptdClient, + context.ns, + command, + timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + ) ) - : await this.markCommand(mongocryptdClient, context.ns, command); + : await this.markCommand( + mongocryptdClient, + context.ns, + command, + timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + ); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -236,8 +252,12 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); - // TODO: timeout here - const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); + const keys = await this.fetchKeys( + keyVaultClient, + keyVaultNamespace, + filter, + timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + ); if (keys.length === 0) { // See docs on EMPTY_V @@ -502,7 +522,8 @@ export class StateMachine { async fetchCollectionInfo( client: MongoClient, ns: string, - filter: Document + filter: Document, + timeoutMS?: number | null ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); @@ -510,7 +531,8 @@ export class StateMachine { .db(db) .listCollections(filter, { promoteLongs: false, - promoteValues: false + promoteValues: false, + timeoutMS }) .toArray(); @@ -526,8 +548,12 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array): Promise { - const options = { promoteLongs: false, promoteValues: false }; + async markCommand(client: MongoClient, ns: string, command: Uint8Array, timeoutMS?: number | null): Promise { + const options: RunCommandOptions = { promoteLongs: false, promoteValues: false }; + if (timeoutMS != null) { + options.timeoutMS = timeoutMS; + options.omitMaxTimeMS = true; + } const { db } = MongoDBCollectionNamespace.fromString(ns); const rawCommand = deserialize(command, options); @@ -547,7 +573,8 @@ export class StateMachine { fetchKeys( client: MongoClient, keyVaultNamespace: string, - filter: Uint8Array + filter: Uint8Array, + timeoutMS?: number | null ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -555,7 +582,7 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter)) + .find(deserialize(filter), { timeoutMS }) .toArray(); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 90a17f1a6ef..5ef459607d8 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -35,7 +35,7 @@ export interface OperationOptions extends BSONSerializeOptions { omitMaxTimeMS?: boolean; /** @internal TODO(NODE-5688): make this public */ - timeoutMS?: number; + timeoutMS?: number | null; } /** @internal */ From 12a7e2eb0edd40f623b57e6cadeae2b7bdd307cd Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 24 Sep 2024 17:17:33 -0400 Subject: [PATCH 015/136] chore: plumb timeoutMS around more --- src/client-side-encryption/state_machine.ts | 83 +++++++++++-------- src/operations/operation.ts | 4 +- ...ient_side_operations_timeout.prose.test.ts | 53 +++++++++--- .../server_description.test.ts | 4 +- 4 files changed, 95 insertions(+), 49 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index fbe37ad36d5..23e83edf83a 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -12,15 +12,14 @@ import { } from '../bson'; import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; +import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; +import { type CSOTTimeoutContext, Timeout, type TimeoutContext } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; import { type MongocryptdManager } from './mongocryptd_manager'; import { type KMSProviders } from './providers'; -import { CSOTTimeoutContext, TimeoutContext } from '../timeout'; -import { timeLog } from 'console'; -import { RunCommandOptions } from '../operations/run_command'; let socks: SocksLib | null = null; function loadSocks(): SocksLib { @@ -185,7 +184,11 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext, timeoutContext?: TimeoutContext): Promise { + async execute( + executor: StateMachineExecutable, + context: MongoCryptContext, + timeoutContext?: CSOTTimeoutContext + ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -209,8 +212,8 @@ export class StateMachine { metaDataClient, context.ns, filter, - timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null - ) + timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null + ); if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -235,15 +238,15 @@ export class StateMachine { mongocryptdClient, context.ns, command, - timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null ) ) : await this.markCommand( - mongocryptdClient, - context.ns, - command, - timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null - ); + mongocryptdClient, + context.ns, + command, + timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null + ); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -256,7 +259,7 @@ export class StateMachine { keyVaultClient, keyVaultNamespace, filter, - timeoutContext instanceof CSOTTimeoutContext ? timeoutContext?.remainingTimeMS : null + timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null ); if (keys.length === 0) { @@ -279,9 +282,12 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - const requests = Array.from(this.requests(context)); - await Promise.all(requests); - + await Promise.all( + this.requests( + context, + timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null + ) + ); context.finishKMSRequests(); break; } @@ -323,7 +329,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutMS?: number | null): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -353,10 +359,6 @@ export class StateMachine { } } - function ontimeout() { - return new MongoCryptError('KMS request timed out'); - } - function onerror(cause: Error) { return new MongoCryptError('KMS request failed', { cause }); } @@ -388,7 +390,6 @@ export class StateMachine { resolve: resolveOnNetSocketConnect } = promiseWithResolvers(); netSocket - .once('timeout', () => rejectOnNetSocketError(ontimeout())) .once('error', err => rejectOnNetSocketError(onerror(err))) .once('close', () => rejectOnNetSocketError(onclose())) .once('connect', () => resolveOnNetSocketConnect()); @@ -434,8 +435,8 @@ export class StateMachine { reject: rejectOnTlsSocketError, resolve } = promiseWithResolvers(); + socket - .once('timeout', () => rejectOnTlsSocketError(ontimeout())) .once('error', err => rejectOnTlsSocketError(onerror(err))) .once('close', () => rejectOnTlsSocketError(onclose())) .on('data', data => { @@ -449,20 +450,26 @@ export class StateMachine { resolve(); } }); - await willResolveKmsRequest; + + await (typeof timeoutMS === 'number' + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutMS)]) + : willResolveKmsRequest); + } catch (error) { + if (Timeout.is(error)) throw new MongoOperationTimeoutError('KMS request timed out'); + throw error; } finally { // There's no need for any more activity on this socket at this point. destroySockets(); } } - *requests(context: MongoCryptContext) { + *requests(context: MongoCryptContext, timeoutMS?: number | null) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request); + yield this.kmsRequest(request, timeoutMS); } } @@ -532,7 +539,9 @@ export class StateMachine { .listCollections(filter, { promoteLongs: false, promoteValues: false, - timeoutMS + ...(typeof timeoutMS === 'number' + ? { timeoutMS, timeoutMode: 'cursorLifetime' } + : undefined) }) .toArray(); @@ -548,16 +557,20 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array, timeoutMS?: number | null): Promise { - const options: RunCommandOptions = { promoteLongs: false, promoteValues: false }; - if (timeoutMS != null) { - options.timeoutMS = timeoutMS; - options.omitMaxTimeMS = true; - } + async markCommand( + client: MongoClient, + ns: string, + command: Uint8Array, + timeoutMS?: number | null + ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const rawCommand = deserialize(command, options); + const bsonOptions = { promoteLongs: false, promoteValues: false }; + const rawCommand = deserialize(command, bsonOptions); - const response = await client.db(db).command(rawCommand, options); + const response = await client.db(db).command(rawCommand, { + ...bsonOptions, + ...(typeof timeoutMS === 'number' ? { timeoutMS, omitMaxTimeMS: true } : undefined) + }); return serialize(response, this.bsonOptions); } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 5ef459607d8..c9792376499 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -34,8 +34,8 @@ export interface OperationOptions extends BSONSerializeOptions { /** @internal Hint to `executeOperation` to omit maxTimeMS */ omitMaxTimeMS?: boolean; - /** @internal TODO(NODE-5688): make this public */ - timeoutMS?: number | null; + /** @public The time limit an operation will be permitted to run for */ + timeoutMS?: number; } /** @internal */ diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 0d36998fd96..b02af226846 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,5 +1,7 @@ /* Specification prose tests */ +import { type ChildProcess, spawn } from 'node:child_process'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -57,16 +59,47 @@ describe('CSOT spec prose tests', function () { */ }); - context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { - /** - * This test MUST only be run against enterprise server versions 4.2 and higher. - * - * 1. Launch a mongocryptd process on 23000. - * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. - * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. - * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. - */ - }); + context.skip( + '2. maxTimeMS is not set for commands sent to mongocryptd', + { requires: { mongodb: '>=4.2' } }, + () => { + /** + * This test MUST only be run against enterprise server versions 4.2 and higher. + * + * 1. Launch a mongocryptd process on 23000. + * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. + * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. + * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + */ + + let client: MongoClient; + const mongocryptdTestPort = '23000'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:23000/?timeoutMS=1000`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('maxTimeMS is not set', async function () { + const commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client.db('admin').command({ ping: 1 }); + expect(commandStarted).to.have.lengthOf(1); + expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); + }); + } + ); context.skip('3. ClientEncryption', () => { /** diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts index 0a3c7eecbf6..90743ba18d4 100644 --- a/test/integration/server-discovery-and-monitoring/server_description.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -25,7 +25,7 @@ describe('class ServerDescription', function () { childProcess.kill('SIGKILL'); }); - it('iscryptd is set to true ', async function () { + it('iscryptd is set to true', async function () { const descriptions = []; client.on('serverDescriptionChanged', description => descriptions.push(description)); const hello = await client.db().command({ hello: true }); @@ -45,7 +45,7 @@ describe('class ServerDescription', function () { await client?.close(); }); - it('iscryptd is set to false ', async function () { + it('iscryptd is set to false', async function () { const descriptions = []; client.on('serverDescriptionChanged', description => descriptions.push(description)); const hello = await client.db().command({ hello: true }); From 999f23d26a7813c3e111b4bce505030cf277e4a7 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 11 Apr 2024 17:15:41 -0400 Subject: [PATCH 016/136] feat(NODE-6090): Implement CSOT logic for connection checkout and server selection --- src/admin.ts | 3 +- src/cmap/connection.ts | 4 + src/cmap/connection_pool.ts | 53 ++- src/collection.ts | 5 + src/db.ts | 6 + src/error.ts | 9 + src/index.ts | 1 + src/operations/command.ts | 2 + src/operations/find.ts | 3 +- src/operations/operation.ts | 8 + src/operations/run_command.ts | 9 +- src/sdam/server.ts | 3 +- src/sdam/topology.ts | 54 ++- src/timeout.ts | 14 + src/utils.ts | 10 + ...ient_side_operations_timeout.prose.test.ts | 315 +++++++++++++----- ...lient_side_operations_timeout.unit.test.ts | 140 +++++--- .../node_csot.test.ts | 75 ++++- test/unit/cmap/connection_pool.test.js | 33 +- test/unit/index.test.ts | 1 + 20 files changed, 570 insertions(+), 178 deletions(-) diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..e030384eafc 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 986cce46b6e..445967faa5b 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,6 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type Timeout } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -94,6 +95,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeout?: Timeout; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..79440db1e06 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,13 +21,14 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; +import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,37 +355,57 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { - const checkoutTime = now(); + async checkOut(options?: { timeout?: Timeout }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + let timeout: Timeout | null = null; + if (options?.timeout) { + // CSOT enabled + // Determine if we're using the timeout passed in or a new timeout + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + // This check determines whether or not Topology.selectServer used the configured + // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + // server selection used `timeoutMS`, so we should use the existing timeout as the timeout + // here + timeout = options.timeout; + } else { + // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with + // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut + // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking + timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); + } + } + } else { + timeout = Timeout.expires(waitQueueTimeoutMS); + } const waitQueueMember: WaitQueueMember = { resolve, - reject, - timeout, - checkoutTime + reject }; this[kWaitQueue].push(waitQueueMember); process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +416,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options?.timeout) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (timeout !== options?.timeout) timeout?.clear(); } } @@ -764,7 +791,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +811,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +853,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..dbd91371cce 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -262,6 +262,11 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior diff --git a/src/db.ts b/src/db.ts index 53c18e44af6..6e1aa194acf 100644 --- a/src/db.ts +++ b/src/db.ts @@ -222,6 +222,11 @@ export class Db { return this.s.namespace.toString(); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -272,6 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/error.ts b/src/error.ts index c9652877cb2..3f47e07d662 100644 --- a/src/error.ts +++ b/src/error.ts @@ -788,6 +788,15 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @internal + */ +export class MongoOperationTimeoutError extends MongoRuntimeError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/index.ts b/src/index.ts index f68dd7699e0..ba28f50ebe6 100644 --- a/src/index.ts +++ b/src/index.ts @@ -64,6 +64,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, diff --git a/src/operations/command.ts b/src/operations/command.ts index 94ccc6ceafe..c64b4ae963a 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -65,6 +65,7 @@ export interface OperationParent { writeConcern?: WriteConcern; readPreference?: ReadPreference; bsonOptions?: BSONSerializeOptions; + timeoutMS?: number; } /** @internal */ @@ -131,6 +132,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeout: this.timeout, readPreference: this.readPreference, session }; diff --git a/src/operations/find.ts b/src/operations/find.ts index a040af73bc6..0f81f2d61f2 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -116,7 +116,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeout: this.timeout }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/operation.ts b/src/operations/operation.ts index b51cca40201..0599b72b96d 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type Timeout } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -56,6 +57,11 @@ export abstract class AbstractOperation { options: OperationOptions; + /** @internal */ + timeout?: Timeout; + /** @internal */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -73,6 +79,8 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; + + this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..56462fa8843 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -14,6 +14,8 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** @internal */ + timeoutMS?: number; } & BSONSerializeOptions; /** @internal */ @@ -39,10 +41,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }, this.options.responseType ); + return res; } } @@ -68,7 +72,8 @@ export class RunAdminCommandOperation extends AbstractOperation const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }); return res; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index b4450f00727..a1b885382ec 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,7 +311,7 @@ export class Server extends TypedEventEmitter { this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } @@ -336,6 +336,7 @@ export class Server extends TypedEventEmitter { operationError.code === MONGODB_ERROR_CODES.Reauthenticate ) { await this.pool.reauthenticate(conn); + // TODO(NODE-5682): Implement CSOT support for socket read/write at the connection layer try { const res = await conn.command(ns, cmd, finalOptions, responseType); throwIfWriteConcernError(res); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..4c9d71d807d 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -37,6 +38,7 @@ import { Timeout, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, + csotMin, type EventEmitterWithState, HostAddress, List, @@ -107,7 +109,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -457,8 +458,14 @@ export class Topology extends TypedEventEmitter { } } + const timeoutMS = this.client.options.timeoutMS; + const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const selectServerOptions = { + operationName: 'ping', + timeout, + ...options + }; try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), @@ -467,7 +474,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +563,25 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; + let timeout: Timeout | null; + if (options.timeout) { + // CSOT Enabled + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + timeout = options.timeout; + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } + } else { + timeout = null; + } + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +604,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (timeout !== options.timeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +617,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +627,14 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +654,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeout) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (timeout !== options.timeout) timeout?.clear(); } } /** @@ -889,8 +922,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +975,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1062,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..7af1a23f261 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -40,6 +40,16 @@ export class Timeout extends Promise { public duration: number; public timedOut = false; + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } + /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = false) { let reject!: Reject; @@ -78,6 +88,10 @@ export class Timeout extends Promise { this.id = undefined; } + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out'); + } + public static expires(durationMS: number, unref?: boolean): Timeout { return new Timeout(undefined, durationMS, unref); } diff --git a/src/utils.ts b/src/utils.ts index 5ad754c9321..ebc0784cb1f 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -544,6 +544,10 @@ export function resolveOptions( result.readPreference = readPreference; } + const timeoutMS = options?.timeoutMS; + + result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + return result; } @@ -1379,6 +1383,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..903ea9c3bb4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,30 @@ /* Specification prose tests */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now +} from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + context.skip('1. Multi-batch writes', () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -31,7 +53,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { + context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { /** * This test MUST only be run against enterprise server versions 4.2 and higher. * @@ -42,7 +64,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('3. ClientEncryption', () => { + context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, * `LOCAL_MASTERKEY` refers to the following base64: @@ -132,7 +154,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('4. Background Connection Pooling', () => { + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -192,7 +214,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('5. Blocking Iteration Methods', () => { + context.skip('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -251,7 +273,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('6. GridFS - Upload', () => { + context.skip('6. GridFS - Upload', () => { /** Tests in this section MUST only be run against server versions 4.4 and higher. */ context('uploads via openUploadStream can be timed out', () => { @@ -306,7 +328,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('7. GridFS - Download', () => { + context.skip('7. GridFS - Download', () => { /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -351,96 +373,225 @@ describe.skip('CSOT spec prose tests', () => { }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }); + }); + + it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); + + it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); }); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + context.skip('9. endSession', () => { /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -472,7 +623,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('10. Convenient Transactions', () => { + context.skip('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..c1426d8db1d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,105 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(Timeout.expires).to.have.been.calledWith(10000); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + // Check that we passed through the timeout + expect(checkoutSpy.firstCall.args[0].timeout).to.equal( + selectServerSpy.lastCall.lastArg.timeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + context.skip('Client side encryption', function () { + context( + 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', + () => {} + ); + + context( + 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', + () => {} + ); + }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..5636eb00db7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -7,7 +7,9 @@ import { type Collection, type Db, type FindCursor, - type MongoClient + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoOperationTimeoutError } from '../../mongodb'; describe('CSOT driver tests', () => { @@ -94,4 +96,75 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..18048befab4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,7 +5,7 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); @@ -26,6 +26,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -98,7 +101,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -114,23 +117,15 @@ describe('Connection Pool', function () { pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut(); + const err = await pool.checkOut().catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index c8a1406a000..f1936e2c6d0 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -108,6 +108,7 @@ const EXPECTED_EXPORTS = [ 'MongoTailableCursorError', 'MongoTopologyClosedError', 'MongoTransactionError', + 'MongoOperationTimeoutError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', 'WriteConcernErrorResult', From 03554042ba93dbd5c2edf43554bfeffcd5d93743 Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 10 Jun 2024 10:46:02 -0400 Subject: [PATCH 017/136] test(NODE-6120): Implement Unified test runner changes for CSOT (#4121) --- test/spec/unified-test-format/Makefile | 37 +++++- .../collectionData-createOptions.yml | 7 +- .../valid-pass/createEntities-operation.json | 74 ++++++++++++ .../valid-pass/createEntities-operation.yml | 38 ++++++ .../valid-pass/entity-cursor-iterateOnce.json | 111 ++++++++++++++++++ .../valid-pass/entity-cursor-iterateOnce.yml | 59 ++++++++++ .../valid-pass/entity-find-cursor.json | 15 ++- .../valid-pass/entity-find-cursor.yml | 6 +- ...ectedEventsForClient-ignoreExtraEvents.yml | 2 +- .../valid-pass/matches-lte-operator.json | 78 ++++++++++++ .../valid-pass/matches-lte-operator.yml | 41 +++++++ .../valid-pass/poc-change-streams.json | 36 ++++++ .../valid-pass/poc-change-streams.yml | 18 +++ .../valid-pass/poc-crud.json | 2 +- .../valid-pass/poc-crud.yml | 2 +- .../valid-pass/poc-sessions.json | 2 +- .../valid-pass/poc-sessions.yml | 3 +- .../poc-transactions-convenient-api.json | 2 +- .../poc-transactions-convenient-api.yml | 2 +- .../poc-transactions-mongos-pin-auto.json | 2 +- .../poc-transactions-mongos-pin-auto.yml | 2 +- .../valid-pass/poc-transactions.json | 6 +- .../valid-pass/poc-transactions.yml | 6 +- test/tools/unified-spec-runner/match.ts | 32 ++++- test/tools/unified-spec-runner/schema.ts | 1 + 25 files changed, 547 insertions(+), 37 deletions(-) create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.yml create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.json create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.yml diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index f92004c7760..7b2668e88a0 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -24,6 +24,7 @@ import { Long, MongoBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -97,6 +98,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -105,7 +119,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -116,7 +131,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -129,7 +145,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -389,6 +406,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -758,6 +778,12 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; From 5ef3d690d4e0282868b55eca2f0bc9e47bb1cfca Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 21 Jun 2024 12:06:30 -0400 Subject: [PATCH 018/136] refactor(NODE-6187): refactor to use TimeoutContext abstraction (#4131) --- src/bulk/common.ts | 4 + src/cmap/connection.ts | 4 +- src/cmap/connection_pool.ts | 39 +--- src/index.ts | 18 +- src/operations/aggregate.ts | 5 +- src/operations/bulk_write.ts | 11 +- src/operations/command.ts | 8 +- src/operations/count.ts | 9 +- src/operations/create_collection.ts | 18 +- src/operations/delete.ts | 21 +- src/operations/distinct.ts | 9 +- src/operations/drop.ts | 24 ++- src/operations/estimated_document_count.ts | 9 +- src/operations/execute_operation.ts | 16 +- src/operations/find.ts | 6 +- src/operations/find_and_modify.ts | 9 +- src/operations/get_more.ts | 5 +- src/operations/indexes.ts | 22 +- src/operations/insert.ts | 19 +- src/operations/kill_cursors.ts | 12 +- src/operations/list_collections.ts | 5 +- src/operations/list_databases.ts | 11 +- src/operations/operation.ts | 10 +- src/operations/profiling_level.ts | 9 +- src/operations/remove_user.ts | 9 +- src/operations/rename.ts | 9 +- src/operations/run_command.ts | 17 +- src/operations/search_indexes/create.ts | 12 +- src/operations/search_indexes/drop.ts | 9 +- src/operations/search_indexes/update.ts | 9 +- src/operations/set_profiling_level.ts | 6 +- src/operations/stats.ts | 9 +- src/operations/update.ts | 24 ++- src/operations/validate_collection.ts | 9 +- src/sdam/server.ts | 12 +- src/sdam/topology.ts | 55 +++-- src/timeout.ts | 166 +++++++++++++- ...lient_side_operations_timeout.unit.test.ts | 12 +- .../node_csot.test.ts | 2 +- test/tools/cmap_spec_runner.ts | 12 +- test/unit/cmap/connection_pool.test.js | 22 +- test/unit/error.test.ts | 19 +- test/unit/operations/get_more.test.ts | 2 +- test/unit/sdam/topology.test.ts | 76 +++++-- test/unit/timeout.test.ts | 204 +++++++++++++++++- 45 files changed, 796 insertions(+), 202 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index c133a57d227..9eb63382443 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -20,6 +20,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, type Callback, @@ -873,6 +874,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } const executeCommandsAsync = promisify(executeCommands); diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 445967faa5b..e88e784b457 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,7 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type TimeoutContext } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -97,7 +97,7 @@ export interface CommandOptions extends BSONSerializeOptions { directConnection?: boolean; /** @internal */ - timeout?: Timeout; + timeoutContext?: TimeoutContext; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 79440db1e06..5369cc155aa 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -27,8 +27,8 @@ import { } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type TimeoutContext, TimeoutError } from '../timeout'; +import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -355,41 +355,15 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(options?: { timeout?: Timeout }): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - let timeout: Timeout | null = null; - if (options?.timeout) { - // CSOT enabled - // Determine if we're using the timeout passed in or a new timeout - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - // This check determines whether or not Topology.selectServer used the configured - // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - // server selection used `timeoutMS`, so we should use the existing timeout as the timeout - // here - timeout = options.timeout; - } else { - // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with - // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut - // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking - timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); - } - } - } else { - timeout = Timeout.expires(waitQueueTimeoutMS); - } + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, @@ -404,6 +378,7 @@ export class ConnectionPool extends TypedEventEmitter { return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; this.emitAndLog( @@ -416,7 +391,7 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); - if (options?.timeout) { + if (options.timeoutContext.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during connection checkout', { cause: timeoutError }); @@ -425,7 +400,7 @@ export class ConnectionPool extends TypedEventEmitter { } throw error; } finally { - if (timeout !== options?.timeout) timeout?.clear(); + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } diff --git a/src/index.ts b/src/index.ts index ba28f50ebe6..6ddcc887bc3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -563,7 +563,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -594,7 +600,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index a5a267ac3e4..50494cbba73 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -3,6 +3,7 @@ import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/r import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -97,7 +98,8 @@ export class AggregateOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -142,6 +144,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/command.ts b/src/operations/command.ts index c64b4ae963a..5bd80f796d1 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -7,6 +7,7 @@ import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { commandSupportsReadConcern, decorateWithExplain, @@ -112,19 +113,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -132,7 +136,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, - timeout: this.timeout, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..82330a11e76 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -36,7 +37,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +64,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..afb2680b9a0 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -124,7 +125,11 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; @@ -155,7 +160,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +168,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +178,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +186,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +204,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..787bb6e7d0f 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 97e60450739..39937c8abf4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,7 +24,8 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { supportsRetryableWrites } from '../utils'; +import { TimeoutContext } from '../timeout'; +import { squashError, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -86,6 +87,12 @@ export async function executeOperation< ); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -109,7 +116,8 @@ export async function executeOperation< return await tryOperation(operation, { topology, session, - readPreference + readPreference, + timeoutContext }); } finally { if (session?.owner != null && session.owner === owner) { @@ -260,7 +268,7 @@ async function tryOperation< } try { - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; diff --git a/src/operations/find.ts b/src/operations/find.ts index 0f81f2d61f2..5f359324d56 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -5,6 +5,7 @@ import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -98,7 +99,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -117,7 +119,7 @@ export class FindOperation extends CommandOperation { ...this.bsonOptions, documentsReturnedIn: 'firstBatch', session, - timeout: this.timeout + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..c96a5d73453 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,9 +349,13 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } @@ -379,7 +388,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +403,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..702db0fe3f2 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -54,12 +55,14 @@ export class ListCollectionsOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 0599b72b96d..97e12871ee2 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,7 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type Timeout, type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -79,15 +79,17 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; - - this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index 56462fa8843..b91e2d0344e 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -33,7 +34,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -42,7 +47,7 @@ export class RunCommandOperation extends AbstractOperation { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }, this.options.responseType ); @@ -67,13 +72,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 7e5e55d18d6..9661026e3eb 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -32,14 +33,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index 4e287cca012..e9ea0ad01ce 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -4,6 +4,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -31,7 +36,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index aad7f93536c..e88e777d675 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -27,7 +32,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index a1b885382ec..20cb13423c4 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4c9d71d807d..6117b5317cd 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -34,11 +34,10 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, - csotMin, type EventEmitterWithState, HostAddress, List, @@ -179,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-5685): Make this required + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -458,13 +460,20 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.options.timeoutMS; - const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; + const timeoutMS = this.client.s.options.timeoutMS; + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; + + const timeoutContext = TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { operationName: 'ping', - timeout, - ...options + ...options, + timeoutContext }; try { const server = await this.selectServer( @@ -474,7 +483,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -563,24 +572,10 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } - const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; - let timeout: Timeout | null; - if (options.timeout) { - // CSOT Enabled - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - timeout = options.timeout; - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); - } - } else { - timeout = null; - } - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); } const isSharded = this.description.type === TopologyType.Sharded; @@ -604,7 +599,7 @@ export class Topology extends TypedEventEmitter { ) ); } - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } @@ -654,7 +649,7 @@ export class Topology extends TypedEventEmitter { ); } - if (options.timeout) { + if (options.timeoutContext?.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during server selection', { cause: timeoutError }); @@ -664,7 +659,7 @@ export class Topology extends TypedEventEmitter { // Other server selection error throw error; } finally { - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** diff --git a/src/timeout.ts b/src/timeout.ts index 7af1a23f261..3d65992a02b 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,7 +1,7 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { @@ -107,3 +107,165 @@ export class Timeout extends Promise { ); } } + +/** @internal */ +export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract csotEnabled(): this is CSOTTimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _maxTimeMS?: number; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this._maxTimeMS ?? -1; + } + + set maxTimeMS(v: number) { + this._maxTimeMS = v; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object') { + const usingServerSelectionTimeoutMS = + this.serverSelectionTimeoutMS !== 0 && + csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; + + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + } else { + if (this.timeoutMS > 0) { + this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (typeof this._connectionCheckoutTimeout !== 'object') { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } +} diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c1426d8db1d..c4989f58d7f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -33,16 +33,20 @@ describe('CSOT spec unit tests', function () { client = this.configuration.newClient({ timeoutMS: 1000 }); // Spy on connection checkout and pull options argument const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); - const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); const expiresSpy = sinon.spy(Timeout, 'expires'); await client.db('db').collection('collection').insertOne({ x: 1 }); expect(checkoutSpy).to.have.been.calledOnce; - expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; // Check that we passed through the timeout - expect(checkoutSpy.firstCall.args[0].timeout).to.equal( - selectServerSpy.lastCall.lastArg.timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout ); // Check that no more Timeouts are constructed after we enter checkout diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 5636eb00db7..17d85ba5b23 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -143,7 +143,7 @@ describe('CSOT driver tests', () => { }); it('throws a MongoOperationTimeoutError', { - metadata: { requires: { mongodb: '>=4.4' } }, + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index f6d7e68bedc..9bb2abdb87a 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -12,7 +12,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -185,7 +186,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 18048befab4..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -10,8 +10,10 @@ const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -44,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -64,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -93,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -114,11 +122,15 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - const conn = await pool.checkOut(); - const err = await pool.checkOut().catch(e => e); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); sinon.stub(pool, 'availableConnectionCount').get(() => 0); pool.checkIn(conn); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..bdc049cbc4f 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -28,6 +28,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -376,11 +377,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +426,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..17bc20f6fa7 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -69,7 +69,7 @@ describe('GetMoreOperation', function () { const call = stub.getCall(0); expect(call.args[0]).to.equal(namespace); expect(call.args[1]).to.deep.equal(expectedGetMoreCommand); - expect(call.args[2]).to.deep.equal(opts); + expect(call.args[2]).to.containSubset(opts); }); }); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..119d0516a9c 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,6 +1,14 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; @@ -115,3 +123,197 @@ describe('Timeout', function () { }); }); }); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); + }); + }); + + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); + }); + }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); + }); + }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); + }); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); + + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); + }); + }); + }); +}); From 7139b8f322f798f5bad32ebc67783c46ca1119b2 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 22 Jul 2024 11:17:22 -0400 Subject: [PATCH 019/136] refactor(NODE-6230): executeOperation to use iterative retry mechanism (#4157) --- src/cmap/connection_pool.ts | 6 ++++-- src/operations/execute_operation.ts | 27 ++++++++++++++++----------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5369cc155aa..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -28,7 +28,7 @@ import { import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { type TimeoutContext, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -356,6 +356,7 @@ export class ConnectionPool extends TypedEventEmitter { * explicitly destroyed by the new owner. */ async checkOut(options: { timeoutContext: TimeoutContext }): Promise { + const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) @@ -367,7 +368,8 @@ export class ConnectionPool extends TypedEventEmitter { const waitQueueMember: WaitQueueMember = { resolve, - reject + reject, + checkoutTime }; this[kWaitQueue].push(waitQueueMember); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 39937c8abf4..efd92f19de3 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -25,7 +25,7 @@ import { import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import { TimeoutContext } from '../timeout'; -import { squashError, supportsRetryableWrites } from '../utils'; +import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -87,12 +87,6 @@ export async function executeOperation< ); } - timeoutContext ??= TimeoutContext.create({ - serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, - waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, - timeoutMS: operation.options.timeoutMS - }); - const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -112,12 +106,18 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, - readPreference, - timeoutContext + readPreference }); } finally { if (session?.owner != null && session.owner === owner) { @@ -156,6 +156,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -179,7 +180,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -197,7 +201,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); From acfb4fc8c323b71930ad08f6b976b14d92ccb0b2 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 26 Jul 2024 09:55:20 -0400 Subject: [PATCH 020/136] feat(NODE-5682): set maxTimeMS on commands and preempt I/O (#4174) Co-authored-by: Warren James --- src/admin.ts | 5 +- src/cmap/connection.ts | 66 ++++++++++++++++--- src/cmap/wire_protocol/on_data.ts | 17 ++++- src/db.ts | 2 +- src/sdam/topology.ts | 17 +++-- src/timeout.ts | 43 ++++++++++-- ...ient_side_operations_timeout.prose.test.ts | 20 +++--- ...lient_side_operations_timeout.spec.test.ts | 33 +++++++++- .../node_csot.test.ts | 1 - test/integration/node-specific/db.test.js | 22 ++----- test/spec/{index.js => index.ts} | 19 ++---- test/tools/cmap_spec_runner.ts | 3 +- test/tools/unified-spec-runner/entities.ts | 4 +- test/tools/unified-spec-runner/match.ts | 15 ++++- test/tools/unified-spec-runner/operations.ts | 8 +-- test/unit/tools/unified_spec_runner.test.ts | 2 +- 16 files changed, 200 insertions(+), 77 deletions(-) rename test/spec/{index.js => index.ts} (67%) diff --git a/src/admin.ts b/src/admin.ts index e030384eafc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -155,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index e88e784b457..3f391bea400 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -19,6 +19,7 @@ import { MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -30,7 +31,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type TimeoutContext } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -417,6 +418,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (options.timeoutContext?.csotEnabled()) { + const { maxTimeMS } = options.timeoutContext; + if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -431,7 +437,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -440,7 +448,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse || message.moreToCome) { @@ -450,7 +459,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -627,7 +646,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -639,8 +662,32 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + if (TimeoutError.is(error)) { + throw new MongoOperationTimeoutError('Timed out at socket write'); + } + throw error; + } + } + return await drainEvent; } /** @@ -652,9 +699,12 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..a32c6b1b484 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,7 @@ import { type EventEmitter } from 'events'; +import { MongoOperationTimeoutError } from '../../error'; +import { type TimeoutContext, TimeoutError } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +20,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -86,6 +91,8 @@ export function onData(emitter: EventEmitter) { // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + // eslint-disable-next-line github/no-then + timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -97,8 +104,12 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - if (promise != null) promise.reject(err); - else error = err; + const timeoutError = TimeoutError.is(err) + ? new MongoOperationTimeoutError('Timed out during socket read') + : undefined; + + if (promise != null) promise.reject(timeoutError ?? err); + else error = timeoutError ?? err; void closeHandler(); } diff --git a/src/db.ts b/src/db.ts index 6e1aa194acf..48501bc497e 100644 --- a/src/db.ts +++ b/src/db.ts @@ -277,7 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS, + timeoutMS: options?.timeoutMS ?? this.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6117b5317cd..479003f0e35 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -460,29 +460,28 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.s.options.timeoutMS; + // TODO(NODE-6223): auto connect cannot use timeoutMS + // const timeoutMS = this.client.s.options.timeoutMS; const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const timeoutContext = TimeoutContext.create({ - timeoutMS, + timeoutMS: undefined, serverSelectionTimeoutMS, waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS }); - const selectServerOptions = { operationName: 'ping', ...options, timeoutContext }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { + if (!skipPingOnConnect && this.s.credentials) { await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); @@ -623,7 +622,11 @@ export class Topology extends TypedEventEmitter { try { timeout?.throwIfExpired(); - return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout diff --git a/src/timeout.ts b/src/timeout.ts index 3d65992a02b..cc90b8c2e72 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,6 +1,6 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { csotMin, noop } from './utils'; /** @internal */ @@ -51,7 +51,7 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { + private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; if (duration < 0) { @@ -163,6 +163,10 @@ export abstract class TimeoutContext { abstract get clearConnectionCheckoutTimeout(): boolean; + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + abstract csotEnabled(): this is CSOTTimeoutContext; } @@ -175,13 +179,15 @@ export class CSOTTimeoutContext extends TimeoutContext { clearConnectionCheckoutTimeout: boolean; clearServerSelectionTimeout: boolean; - private _maxTimeMS?: number; - private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + private start: number; constructor(options: CSOTTimeoutContextOptions) { super(); + this.start = Math.trunc(performance.now()); + this.timeoutMS = options.timeoutMS; this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; @@ -193,11 +199,12 @@ export class CSOTTimeoutContext extends TimeoutContext { } get maxTimeMS(): number { - return this._maxTimeMS ?? -1; + return this.remainingTimeMS - this.minRoundTripTime; } - set maxTimeMS(v: number) { - this._maxTimeMS = v; + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; } csotEnabled(): this is CSOTTimeoutContext { @@ -238,6 +245,20 @@ export class CSOTTimeoutContext extends TimeoutContext { } return this._connectionCheckoutTimeout; } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket write'); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket read'); + } } /** @internal */ @@ -268,4 +289,12 @@ export class LegacyTimeoutContext extends TimeoutContext { return Timeout.expires(this.options.waitQueueTimeoutMS); return null; } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 903ea9c3bb4..729bed42199 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -384,7 +384,7 @@ describe('CSOT spec prose tests', function () { clock.restore(); }); - it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. @@ -416,10 +416,11 @@ describe('CSOT spec prose tests', function () { await clock.tickAsync(11); expect(await maybeError).to.be.instanceof(MongoServerSelectionError); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -440,9 +441,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -462,9 +464,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -484,7 +487,8 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..f73f162204f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -3,7 +3,34 @@ import { join } from 'path'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const enabled = [ + 'override-collection-timeoutMS', + 'override-database-timeoutMS', + 'override-operation-timeoutMS' +]; + +const cursorOperations = [ + 'aggregate', + 'countDocuments', + 'listIndexes', + 'createChangeStream', + 'listCollections', + 'listCollectionNames' +]; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests(join('client-side-operations-timeout')); + for (const spec of specs) { + for (const test of spec.tests) { + // not one of the test suites listed in kickoff + if (!enabled.includes(spec.name)) { + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + + // Cursor operation + if (test.operations.find(operation => cursorOperations.includes(operation.name))) + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + } + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 17d85ba5b23..0c97b910836 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -48,7 +48,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 9bb2abdb87a..892f6311df5 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -427,7 +428,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..9f4e20a828e 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 7b2668e88a0..3e3ba86d0e6 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -173,7 +173,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -218,6 +219,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -371,7 +376,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -784,6 +789,12 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 9cc67174f3c..7a98c7ac978 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -303,6 +303,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); @@ -313,7 +314,7 @@ operations.set('drop', async ({ entities, operation }) => { operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -767,11 +768,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); }); From 4efff95018c4f0f08c3010c18975179b1889ba1e Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 1 Aug 2024 16:08:39 -0400 Subject: [PATCH 021/136] feat(NODE-6231): Add CSOT behaviour for retryable reads and writes (#4186) --- src/operations/execute_operation.ts | 9 ++++--- src/timeout.ts | 26 ++++++++++++------- ...lient_side_operations_timeout.spec.test.ts | 13 +++++++++- ...lient_side_operations_timeout.unit.test.ts | 10 +++++-- .../node_csot.test.ts | 5 ---- test/tools/unified-spec-runner/match.ts | 2 ++ 6 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index efd92f19de3..c9135fa1c32 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -227,12 +227,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -276,7 +274,6 @@ async function tryOperation< return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -285,6 +282,10 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.serverSelectionTimeout?.clear(); + timeoutContext.connectionCheckoutTimeout?.clear(); } } diff --git a/src/timeout.ts b/src/timeout.ts index cc90b8c2e72..297a484b4ec 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -39,6 +39,7 @@ export class Timeout extends Promise { public ended: number | null = null; public duration: number; public timedOut = false; + public cleared = false; get remainingTime(): number { if (this.timedOut) return 0; @@ -53,7 +54,6 @@ export class Timeout extends Promise { /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; - if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } @@ -86,6 +86,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.cleared = true; } throwIfExpired(): void { @@ -213,16 +214,20 @@ export class CSOTTimeoutContext extends TimeoutContext { get serverSelectionTimeout(): Timeout | null { // check for undefined - if (typeof this._serverSelectionTimeout !== 'object') { + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError( + `Timed out in server selection after ${this.timeoutMS}ms` + ); const usingServerSelectionTimeoutMS = - this.serverSelectionTimeoutMS !== 0 && - csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; - + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; if (usingServerSelectionTimeoutMS) { - this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); } else { - if (this.timeoutMS > 0) { - this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); } else { this._serverSelectionTimeout = null; } @@ -233,7 +238,10 @@ export class CSOTTimeoutContext extends TimeoutContext { } get connectionCheckoutTimeout(): Timeout | null { - if (typeof this._connectionCheckoutTimeout !== 'object') { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { if (typeof this._serverSelectionTimeout === 'object') { // null or Timeout this._connectionCheckoutTimeout = this._serverSelectionTimeout; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index f73f162204f..e4c9eb3027c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -6,7 +6,9 @@ import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const enabled = [ 'override-collection-timeoutMS', 'override-database-timeoutMS', - 'override-operation-timeoutMS' + 'override-operation-timeoutMS', + 'retryability-legacy-timeouts', + 'retryability-timeoutMS' ]; const cursorOperations = [ @@ -18,6 +20,11 @@ const cursorOperations = [ 'listCollectionNames' ]; +const bulkWriteOperations = [ + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' +]; + describe('CSOT spec tests', function () { const specs = loadSpecTests(join('client-side-operations-timeout')); for (const spec of specs) { @@ -30,6 +37,10 @@ describe('CSOT spec tests', function () { // Cursor operation if (test.operations.find(operation => cursorOperations.includes(operation.name))) test.skipReason = 'TODO(NODE-5684): Not working yet'; + + if (bulkWriteOperations.includes(test.description)) + test.skipReason = + 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } runUnifiedSuite(specs); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c4989f58d7f..944d9b96048 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -7,7 +7,7 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; +import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -22,10 +22,16 @@ describe('CSOT spec unit tests', function () { it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); await client.db('db').collection('collection').insertOne({ x: 1 }); - expect(Timeout.expires).to.have.been.calledWith(10000); + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); expect(Timeout.expires).to.not.have.been.calledWith(999999); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 0c97b910836..63e2d97dd90 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,6 +1,5 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; -import * as sinon from 'sinon'; import { type ClientSession, @@ -13,10 +12,6 @@ import { } from '../../mongodb'; describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); - describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 3e3ba86d0e6..90996b9640e 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -789,6 +789,8 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their + // errorResponse field if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { From 1997f8179f0e1136091e473d414882c15002b524 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 12 Aug 2024 16:46:06 -0400 Subject: [PATCH 022/136] feat(NODE-6312): add error transformation for server timeouts (#4192) --- src/cmap/connection.ts | 29 ++++ src/cmap/wire_protocol/responses.ts | 36 +++- .../node_csot.test.ts | 163 +++++++++++++++++- 3 files changed, 225 insertions(+), 3 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 3f391bea400..96b8bb26ebd 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -16,6 +16,7 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, @@ -538,6 +539,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -611,6 +617,29 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index 6c166afd61e..12b68784272 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -11,7 +11,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -111,6 +111,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 63e2d97dd90..d7d4a4ede5a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,17 +1,23 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; import { + BSON, type ClientSession, type Collection, + Connection, type Db, type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, - MongoOperationTimeoutError + MongoOperationTimeoutError, + MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', () => { +describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -161,4 +167,157 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded; + let commandsFailed; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command failed', async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + }); + }); + + afterEach(() => sinon.restore()); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + }); + }); + }); }); From cc3ef8f02fc5596fa26193ae7980143d9539ecae Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 9 Sep 2024 11:11:11 -0400 Subject: [PATCH 023/136] feat(NODE-6313): add CSOT support to sessions and transactions (#4199) --- package-lock.json | 82 +++--- package.json | 2 +- src/cmap/connection.ts | 7 + src/cmap/wire_protocol/on_data.ts | 15 +- src/collection.ts | 12 +- src/db.ts | 22 +- src/error.ts | 3 + src/operations/execute_operation.ts | 8 +- src/sessions.ts | 255 ++++++++++++------ src/timeout.ts | 49 +++- src/transactions.ts | 7 +- src/utils.ts | 13 +- ...ient_side_operations_timeout.prose.test.ts | 167 +++++++++++- ...lient_side_operations_timeout.spec.test.ts | 18 +- .../node_csot.test.ts | 150 +++++++++++ .../sessions-inherit-timeoutMS.json | 28 +- .../sessions-inherit-timeoutMS.yml | 19 +- ...sessions-override-operation-timeoutMS.json | 32 ++- .../sessions-override-operation-timeoutMS.yml | 23 +- .../sessions-override-timeoutMS.json | 28 +- .../sessions-override-timeoutMS.yml | 19 +- test/tools/unified-spec-runner/entities.ts | 4 + test/tools/unified-spec-runner/match.ts | 19 +- test/tools/unified-spec-runner/operations.ts | 27 +- 24 files changed, 776 insertions(+), 233 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b07cd361d5..1d9cebf509b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", + "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index 479356905dc..2de0e1811f0 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 96b8bb26ebd..eddd2a9223c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -742,6 +742,13 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + if (TimeoutError.is(readError)) { + throw new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + } + throw readError; } finally { this.dataEvents = null; this.throwIfAborted(); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index a32c6b1b484..23fd88e2828 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,7 +1,6 @@ import { type EventEmitter } from 'events'; -import { MongoOperationTimeoutError } from '../../error'; -import { type TimeoutContext, TimeoutError } from '../../timeout'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -91,8 +90,11 @@ export function onData( // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); // eslint-disable-next-line github/no-then - timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); + timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -104,12 +106,9 @@ export function onData( function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - const timeoutError = TimeoutError.is(err) - ? new MongoOperationTimeoutError('Timed out during socket read') - : undefined; - if (promise != null) promise.reject(timeoutError ?? err); - else error = timeoutError ?? err; + if (promise != null) promise.reject(err); + else error = err; void closeHandler(); } diff --git a/src/collection.ts b/src/collection.ts index dbd91371cce..f3a206b0c7b 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -470,10 +470,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } diff --git a/src/db.ts b/src/db.ts index 48501bc497e..bd0b5450b8c 100644 --- a/src/db.ts +++ b/src/db.ts @@ -275,12 +275,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS ?? this.timeoutMS, - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -385,7 +389,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } diff --git a/src/error.ts b/src/error.ts index 3f47e07d662..3f803a8c4a7 100644 --- a/src/error.ts +++ b/src/error.ts @@ -124,6 +124,9 @@ function isAggregateError(e: unknown): e is Error & { errors: Error[] } { * mongodb-client-encryption has a dependency on this error, it uses the constructor with a string argument */ export class MongoError extends Error { + get [Symbol.toStringTag]() { + return this.name; + } /** @internal */ [kErrorLabels]: Set; /** diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index c9135fa1c32..f9d9f9b63b4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -58,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -81,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -107,6 +102,7 @@ export async function executeOperation< } timeoutContext ??= TimeoutContext.create({ + session, serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, timeoutMS: operation.options.timeoutMS diff --git a/src/sessions.ts b/src/sessions.ts index bad966ed71c..bbd1785275f 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,11 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +102,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** @internal */ + timeoutMS?: number; } /** @@ -115,7 +122,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -137,6 +144,9 @@ export class ClientSession /** @internal */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -149,7 +159,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -269,8 +279,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -286,10 +301,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -441,8 +452,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -492,8 +505,25 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (firstCommitError) { if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) { @@ -503,7 +533,7 @@ export class ClientSession this.unpin({ force: true }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (retryCommitError) { // If the retry failed, we process that error instead of the original @@ -535,8 +565,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -581,18 +616,45 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -647,96 +709,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - - let committed = false; - let result: any; - while (!committed) { - this.startTransaction(options); // may throw on error + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - result = await promise; + let committed = false; + let result: any; - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } + result = await promise; - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index 297a484b4ec..f057bdb90b4 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -52,12 +55,19 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = true) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; + if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -67,16 +77,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -90,11 +104,11 @@ export class Timeout extends Promise { } throwIfExpired(): void { - if (this.timedOut) throw new TimeoutError('Timed out'); + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); } static is(timeout: unknown): timeout is Timeout { @@ -107,10 +121,16 @@ export class Timeout extends Promise { typeof timeout.then === 'function' ); } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } } /** @internal */ -export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; /** @internal */ export type LegacyTimeoutContextOptions = { @@ -151,6 +171,7 @@ function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions /** @internal */ export abstract class TimeoutContext { static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); else throw new MongoRuntimeError('Unrecognized options'); @@ -183,7 +204,7 @@ export class CSOTTimeoutContext extends TimeoutContext { private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; public minRoundTripTime = 0; - private start: number; + public start: number; constructor(options: CSOTTimeoutContextOptions) { super(); @@ -217,8 +238,8 @@ export class CSOTTimeoutContext extends TimeoutContext { if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { const { remainingTimeMS, serverSelectionTimeoutMS } = this; if (remainingTimeMS <= 0) - throw new MongoOperationTimeoutError( - `Timed out in server selection after ${this.timeoutMS}ms` + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) ); const usingServerSelectionTimeoutMS = serverSelectionTimeoutMS !== 0 && @@ -258,14 +279,14 @@ export class CSOTTimeoutContext extends TimeoutContext { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket write'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); } get timeoutForSocketRead(): Timeout | null { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket read'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..db251c82c16 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; @@ -68,7 +68,10 @@ export interface TransactionOptions extends CommandOperationOptions { writeConcern?: WriteConcern; /** A default read preference for commands in this transaction */ readPreference?: ReadPreferenceLike; - /** Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds */ + /** + * Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds + * @deprecated This option is deprecated in favor of `timeoutMS` or `defaultTimeoutMS`. + */ maxCommitTimeMS?: number; } diff --git a/src/utils.ts b/src/utils.ts index ebc0784cb1f..04174813c9c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -517,6 +517,10 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -544,9 +548,14 @@ export function resolveOptions( result.readPreference = readPreference; } - const timeoutMS = options?.timeoutMS; + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } - result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + result.timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; return result; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 729bed42199..406aa53ed6a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,6 +1,7 @@ /* Specification prose tests */ import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { @@ -9,6 +10,7 @@ import { MongoServerSelectionError, now } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -595,7 +597,10 @@ describe('CSOT spec prose tests', function () { 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context.skip('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -625,12 +630,92 @@ describe('CSOT spec prose tests', function () { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context.skip('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -641,7 +726,7 @@ describe('CSOT spec prose tests', function () { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -658,6 +743,80 @@ describe('CSOT spec prose tests', function () { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index e4c9eb3027c..a178cecc5d2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,4 +1,5 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; @@ -8,7 +9,10 @@ const enabled = [ 'override-database-timeoutMS', 'override-operation-timeoutMS', 'retryability-legacy-timeouts', - 'retryability-timeoutMS' + 'retryability-timeoutMS', + 'sessions-override-operation-timeoutMS', + 'sessions-override-timeoutMS', + 'sessions-inherit-timeoutMS' ]; const cursorOperations = [ @@ -43,5 +47,15 @@ describe('CSOT spec tests', function () { 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } - runUnifiedSuite(specs); + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index d7d4a4ede5a..cc767c1d80a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -12,6 +12,7 @@ import { type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, + MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; @@ -320,4 +321,153 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { }); }); }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); }); diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 9f4e20a828e..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 90996b9640e..35c274dfbe0 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -512,6 +512,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -526,9 +533,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -537,9 +542,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -553,9 +556,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 7a98c7ac978..5b5b7040698 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -19,6 +19,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +50,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -241,7 +237,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -371,7 +372,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -718,13 +719,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -945,7 +950,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -959,7 +964,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); From 38affaea63581f00c0fa16c3ae1e8fb9bea28af2 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 11:35:27 -0400 Subject: [PATCH 024/136] feat(NODE-6304): add CSOT support for non-tailable cursors (#4195) --- src/cmap/connection.ts | 4 +- src/cmap/wire_protocol/on_data.ts | 1 - src/collection.ts | 6 +- src/cursor/abstract_cursor.ts | 146 +++- src/cursor/aggregation_cursor.ts | 20 +- src/cursor/change_stream_cursor.ts | 6 +- src/cursor/find_cursor.ts | 2 +- src/cursor/list_collections_cursor.ts | 2 +- src/cursor/list_indexes_cursor.ts | 2 +- src/cursor/run_command_cursor.ts | 14 +- src/index.ts | 2 +- src/operations/aggregate.ts | 4 + src/operations/execute_operation.ts | 3 +- src/operations/find.ts | 4 + src/operations/indexes.ts | 9 +- src/operations/list_collections.ts | 3 + src/operations/operation.ts | 3 + src/operations/run_command.ts | 2 + src/sessions.ts | 12 +- src/timeout.ts | 27 +- ...ient_side_operations_timeout.prose.test.ts | 84 ++- ...lient_side_operations_timeout.spec.test.ts | 83 ++- .../node_csot.test.ts | 335 ++++++++- .../command-execution.json | 153 ++++ .../client-side-operations-timeout/README.md | 661 ++++++++++++++++++ .../change-streams.json | 20 +- .../change-streams.yml | 30 +- .../close-cursors.json | 12 +- .../close-cursors.yml | 12 +- .../command-execution.json | 2 +- .../command-execution.yml | 5 +- .../convenient-transactions.json | 22 +- .../convenient-transactions.yml | 15 +- .../deprecated-options.json | 2 +- .../deprecated-options.yml | 2 +- .../gridfs-advanced.yml | 2 +- .../non-tailable-cursors.json | 20 +- .../non-tailable-cursors.yml | 32 +- .../retryability-timeoutMS.json | 250 +++++++ .../retryability-timeoutMS.yml | 100 +++ .../tailable-awaitData.json | 14 +- .../tailable-awaitData.yml | 18 +- .../tailable-non-awaitData.json | 10 +- .../tailable-non-awaitData.yml | 12 +- test/tools/unified-spec-runner/operations.ts | 7 +- test/unit/cursor/aggregation_cursor.test.ts | 67 +- 46 files changed, 2008 insertions(+), 234 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json create mode 100644 test/spec/client-side-operations-timeout/README.md diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index eddd2a9223c..d0b00b40e1a 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -86,6 +86,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -419,7 +420,7 @@ export class Connection extends TypedEventEmitter { ...options }; - if (options.timeoutContext?.csotEnabled()) { + if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { const { maxTimeMS } = options.timeoutContext; if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } @@ -619,7 +620,6 @@ export class Connection extends TypedEventEmitter { for await (const document of this.sendCommand(ns, command, options, responseType)) { if (options.timeoutContext?.csotEnabled()) { if (MongoDBResponse.is(document)) { - // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT if (document.isMaxTimeExpiredError) { throw new MongoOperationTimeoutError('Server reported a timeout error', { cause: new MongoServerError(document.toObject()) diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 23fd88e2828..64c636f41f1 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -93,7 +93,6 @@ export function onData( const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; timeoutForSocketRead?.throwIfExpired(); - // eslint-disable-next-line github/no-then timeoutForSocketRead?.then(undefined, errorHandler); return iterator; diff --git a/src/collection.ts b/src/collection.ts index f3a206b0c7b..a73a5276f5f 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -678,7 +678,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + if (error instanceof MongoOperationTimeoutError) throw error; // TODO: Check the spec for index management behaviour/file a drivers ticket for this + // Seems like we should throw all errors return false; } } diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..d0f386923ad 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,17 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** @public*/ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** @public + * TODO(NODE-5688): Document and release + * */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -105,6 +117,8 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { noCursorTimeout?: boolean; /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -117,6 +131,8 @@ export type InternalAbstractCursorOptions = Omit { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -658,6 +727,8 @@ export abstract class AbstractCursor< this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -707,7 +778,7 @@ export abstract class AbstractCursor< } ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,6 +789,12 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }); + } try { const state = await this._initialize(this.cursorSession); const response = state.response; @@ -729,7 +806,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -763,6 +840,7 @@ export abstract class AbstractCursor< // otherwise need to call getMore const batchSize = this.cursorOptions.batchSize || 1000; + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; try { const response = await this.getMore(batchSize); @@ -770,7 +848,7 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); + await this.cleanup(undefined, error); } catch (error) { // `cleanupCursor` should never throw, squash and throw the original error squashError(error); @@ -791,7 +869,7 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; try { @@ -806,11 +884,23 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + let timeoutContext: TimeoutContext | undefined; + if (timeoutMS != null) { + this.timeoutContext?.clear(); + timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }); + } else { + this.timeoutContext?.refresh(); + timeoutContext = this.timeoutContext; + } await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContext ); } } catch (error) { diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 9762c8a03bf..056f28454ce 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,4 +1,5 @@ import type { Document } from '../bson'; +import { MongoAPIError } from '../error'; import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; @@ -9,6 +10,7 @@ import { mergeOptions, type MongoDBNamespace } from '../utils'; import { AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -38,6 +40,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -60,7 +71,7 @@ export class AggregationCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } @@ -95,6 +106,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..13f58675552 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 83a12818bd0..96b764dc7ff 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -69,7 +69,7 @@ export class FindCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, findOperation); + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..6b31ce2263a 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,20 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** @internal */ + timeoutMS?: number; + /** @internal */ + timeoutMode?: CursorTimeoutMode; } & BSONSerializeOptions; /** @public */ @@ -105,7 +113,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +131,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/index.ts b/src/index.ts index 6ddcc887bc3..822db1ada8b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -107,7 +107,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, type CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 50494cbba73..096fe372715 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -36,6 +37,9 @@ export interface AggregateOptions extends CommandOperationOptions { let?: Document; out?: string; + + /** @internal */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index f9d9f9b63b4..dd9ba06c514 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -280,8 +280,7 @@ async function tryOperation< previousOperationError = operationError; // Reset timeouts - timeoutContext.serverSelectionTimeout?.clear(); - timeoutContext.connectionCheckoutTimeout?.clear(); + timeoutContext.clear(); } } diff --git a/src/operations/find.ts b/src/operations/find.ts index 5f359324d56..c39695cc0bc 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -64,6 +65,9 @@ export interface FindOptions * @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored. */ oplogReplay?: boolean; + + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index c96a5d73453..220d438d834 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,7 +1,7 @@ import type { Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Collection } from '../collection'; -import { type AbstractCursorOptions } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; @@ -360,7 +360,12 @@ export class DropIndexOperation extends CommandOperation { } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index 702db0fe3f2..50df243a3ff 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,5 +1,6 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -16,6 +17,8 @@ export interface ListCollectionsOptions extends Omit { public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; public cleared = false; get remainingTime(): number { @@ -100,6 +100,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; this.cleared = true; } @@ -190,6 +191,10 @@ export abstract class TimeoutContext { abstract get timeoutForSocketRead(): Timeout | null; abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; } /** @internal */ @@ -288,6 +293,18 @@ export class CSOTTimeoutContext extends TimeoutContext { if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } } /** @internal */ @@ -326,4 +343,12 @@ export class LegacyTimeoutContext extends TimeoutContext { get timeoutForSocketRead(): Timeout | null { return null; } + + refresh(): void { + return; + } + + clear(): void { + return; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 406aa53ed6a..0d36998fd96 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { type CommandStartedEvent } from '../../../mongodb'; import { + type CommandSucceededEvent, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -216,12 +218,52 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('5. Blocking Iteration Methods', () => { + context('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 20 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient.db('db').dropCollection('coll'); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -248,6 +290,29 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it.skip('send correct number of finds and getMores', async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true, awaitData: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6305)'; }); context('Change Streams', () => { @@ -272,6 +337,23 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it.skip('sends correct number of aggregate and getMores', async function () { + const changeStream = client.db('db').collection('coll').watch(); + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 1 getMore + expect(getMores).to.have.lengthOf(1); + }).skipReason = 'TODO(NODE-6305)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index a178cecc5d2..99914fa08e7 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -4,49 +4,55 @@ import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -const enabled = [ - 'override-collection-timeoutMS', - 'override-database-timeoutMS', - 'override-operation-timeoutMS', - 'retryability-legacy-timeouts', - 'retryability-timeoutMS', - 'sessions-override-operation-timeoutMS', - 'sessions-override-timeoutMS', - 'sessions-inherit-timeoutMS' -]; +const skippedSpecs = { + bulkWrite: 'TODO(NODE-6274)', + 'change-streams': 'TODO(NODE-6035)', + 'convenient-transactions': 'TODO(NODE-5687)', + 'deprecated-options': 'TODO(NODE-5689)', + 'gridfs-advanced': 'TODO(NODE-6275)', + 'gridfs-delete': 'TODO(NODE-6275)', + 'gridfs-download': 'TODO(NODE-6275)', + 'gridfs-find': 'TODO(NODE-6275)', + 'gridfs-upload': 'TODO(NODE-6275)', + 'tailable-awaitData': 'TODO(NODE-6035)', + 'tailable-non-awaitData': 'TODO(NODE-6035)' +}; -const cursorOperations = [ - 'aggregate', - 'countDocuments', - 'listIndexes', - 'createChangeStream', - 'listCollections', - 'listCollectionNames' -]; - -const bulkWriteOperations = [ - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' -]; +const skippedTests = { + 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': + 'TODO(NODE-6305)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': + 'TODO(NODE-6274)', + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': + 'TODO(NODE-6274)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' +}; describe('CSOT spec tests', function () { - const specs = loadSpecTests(join('client-side-operations-timeout')); + const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { - // not one of the test suites listed in kickoff - if (!enabled.includes(spec.name)) { - test.skipReason = 'TODO(NODE-5684): Not working yet'; + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; } - - // Cursor operation - if (test.operations.find(operation => cursorOperations.includes(operation.name))) - test.skipReason = 'TODO(NODE-5684): Not working yet'; - - if (bulkWriteOperations.includes(test.description)) - test.skipReason = - 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } + runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; if ( @@ -59,3 +65,10 @@ describe('CSOT spec tests', function () { return false; }); }); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); +}); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index cc767c1d80a..f5ada7eef9f 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,6 @@ /* Anything javascript specific relating to timeouts */ +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -7,6 +9,9 @@ import { BSON, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, Connection, type Db, type FindCursor, @@ -18,7 +23,9 @@ import { } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -171,8 +178,8 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('server-side maxTimeMS errors are transformed', () => { let client: MongoClient; - let commandsSucceeded; - let commandsFailed; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); @@ -221,18 +228,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command failed', async () => { - const error = await client - .db() - .command({ ping: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.property('code', 50); - - expect(commandsFailed).to.have.lengthOf(1); - expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { @@ -267,18 +278,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { afterEach(() => sinon.restore()); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client - .db('admin') - .command({ giveMeWriteErrors: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { @@ -306,22 +321,266 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: 'iteration', timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + await cursor.toArray(); + + expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores + expect( + commandStarted.filter(ev => { + return ( + ev.command.find != null && + ev.command.getMore != null && + ev.command.maxTimeMS != null + ); + }) + ).to.have.lengthOf(0); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient .db() - .collection('a') - .insertOne({}) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeConcernError.code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); }); }); }); + describe.skip('Tailable non-awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe.skip('Tailable awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 5b5b7040698..31414fa4664 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -215,7 +215,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -787,7 +788,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); From 738188bd27c1e3c77ea994352d0cdf3c76168ac0 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 12 Sep 2024 15:24:39 -0400 Subject: [PATCH 025/136] fix(NODE-6374): MongoOperationTimeoutError inherits MongoRuntimeError (#4237) --- etc/notes/errors.md | 6 +++++- src/error.ts | 21 ++++++++++++++++++--- test/unit/error.test.ts | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..114bc1b2e2c 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,10 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +- TODO(NODE-5688): Add MongoOperationTimeoutError documentation + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/src/error.ts b/src/error.ts index 3f803a8c4a7..c8420d30b02 100644 --- a/src/error.ts +++ b/src/error.ts @@ -310,7 +310,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -792,9 +792,24 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } /** - * @internal + * @public + * @category Error + * + * This error is thrown when an operation could not be completed within the specified `timeoutMS`. + * TODO(NODE-5688): expand this documentation. + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` */ -export class MongoOperationTimeoutError extends MongoRuntimeError { +export class MongoOperationTimeoutError extends MongoDriverError { override get name(): string { return 'MongoOperationTimeoutError'; } diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index bdc049cbc4f..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -173,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { From c4a7c2c15759c0c775b71fa49e7cb3ae981142ab Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 16:02:50 -0400 Subject: [PATCH 026/136] test: remove empty skipped context blocks (#4238) --- .../client-side-operations-timeout/node_csot.test.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f5ada7eef9f..56127cc8ace 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -576,11 +576,6 @@ describe('CSOT driver tests', metadata, () => { }); }); - describe.skip('Tailable non-awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe.skip('Tailable awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } From 5aa6d4cf019d36fc0f35d445de787c9f65246fba Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 17 Sep 2024 13:27:43 -0400 Subject: [PATCH 027/136] feat(NODE-5844): add iscryptd to ServerDescription (#4239) --- src/sdam/server_description.ts | 4 ++ .../server_description.test.ts | 56 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 test/integration/server-discovery-and-monitoring/server_description.test.ts diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index cd32f4968b6..d5b67c18080 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -69,6 +69,8 @@ export class ServerDescription { setVersion: number | null; electionId: ObjectId | null; logicalSessionTimeoutMinutes: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -114,6 +116,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -167,6 +170,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..0a3c7eecbf6 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,56 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; + +import { MongoClient } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); From 17a2fdece6fb36f5fb2d0155c781780416c27f7e Mon Sep 17 00:00:00 2001 From: Warren James Date: Wed, 25 Sep 2024 17:43:12 -0400 Subject: [PATCH 028/136] chore: allow clientBulkWrite to use TimeoutContext (#4251) --- .../client_bulk_write/client_bulk_write.ts | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index cb020bde40c..b7e6529c3fd 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -3,6 +3,7 @@ import { type Document } from 'bson'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; @@ -35,9 +36,16 @@ export class ClientBulkWriteOperation extends CommandOperation { - return await super.executeCommand(server, session, this.command, ClientBulkWriteCursorResponse); + return await super.executeCommand( + server, + session, + this.command, + timeoutContext, + ClientBulkWriteCursorResponse + ); } } From 88ca99042f2d2bc5eb0fa7e26029e18bc673f738 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 26 Sep 2024 17:52:59 -0400 Subject: [PATCH 029/136] half testing lint fix prose test 2 --- src/client-side-encryption/state_machine.ts | 7 +-- src/sdam/server.ts | 4 ++ ...ient_side_operations_timeout.prose.test.ts | 18 +++++-- ...lient_side_operations_timeout.unit.test.ts | 53 +++++++++++++----- .../state_machine.test.ts | 54 +++++++++---------- 5 files changed, 89 insertions(+), 47 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 23e83edf83a..ce7bdc483bb 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -14,7 +14,7 @@ import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; -import { type CSOTTimeoutContext, Timeout, type TimeoutContext } from '../timeout'; +import { type CSOTTimeoutContext, Timeout, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -455,7 +455,8 @@ export class StateMachine { ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutMS)]) : willResolveKmsRequest); } catch (error) { - if (Timeout.is(error)) throw new MongoOperationTimeoutError('KMS request timed out'); + if (error instanceof TimeoutError) + throw new MongoOperationTimeoutError('KMS request timed out'); throw error; } finally { // There's no need for any more activity on this socket at this point. @@ -595,7 +596,7 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter), { timeoutMS }) + .find(deserialize(filter), { timeoutMS: timeoutMS != null ? timeoutMS : undefined }) .toArray(); } } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 20cb13423c4..27f4d0808d7 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,6 +311,10 @@ export class Server extends TypedEventEmitter { delete finalOptions.readPreference; } + if (this.description.iscryptd) { + finalOptions.omitMaxTimeMS = true; + } + const session = finalOptions.session; let conn = session?.pinnedConnection; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 9fa625160f2..a255c52555f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -12,7 +12,8 @@ import { MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, - now + now, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -59,7 +60,7 @@ describe('CSOT spec prose tests', function () { */ }); - context.skip( + context( '2. maxTimeMS is not set for commands sent to mongocryptd', { requires: { mongodb: '>=4.2' } }, () => { @@ -83,24 +84,31 @@ describe('CSOT spec prose tests', function () { }); childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); - client = new MongoClient(`mongodb://localhost:23000/?timeoutMS=1000`); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { + monitorCommands: true + }); }); afterEach(async function () { - await client?.close(); + await client.close(); childProcess.kill('SIGKILL'); + sinon.restore(); }); it('maxTimeMS is not set', async function () { const commandStarted = []; client.on('commandStarted', ev => commandStarted.push(ev)); - await client.db('admin').command({ ping: 1 }); + await client + .db('admin') + .command({ ping: 1 }) + .catch(e => squashError(e)); expect(commandStarted).to.have.lengthOf(1); expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); }); } ); + // TODO(NODE-6391): Add timeoutMS support to Explicit Encryption context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 944d9b96048..454f470683f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -6,8 +6,19 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; - -import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; +import { TLSSocket } from 'tls'; + +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + ConnectionPool, + type MongoClient, + MongoOperationTimeoutError, + Timeout, + TimeoutContext, + Topology +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -93,17 +104,35 @@ describe('CSOT spec unit tests', function () { }).skipReason = 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; - context.skip('Client side encryption', function () { - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); + context('Client side encryption', function () { + it('The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', async function () { + const stateMachine = new StateMachine({} as any); + const request = { + addResponse: _response => {}, + status: { + type: 1, + code: 1, + message: 'notARealStatus' + }, + bytesNeeded: 500, + kmsProvider: 'notRealAgain', + endpoint: 'fake', + message: Buffer.from('foobar') + }; + + const timeoutMS = 100; + sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { + await sleep(200); + return {} as TLSSocket; + }); + const err = await stateMachine.kmsRequest(request, timeoutMS).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + // TODO(NODE-6390): Add timeoutMS support to Auto Encryption + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); + }); context.skip('Background Connection Pooling', function () { context( diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..8245168c720 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -16,40 +16,40 @@ import { Db } from '../../../src/db'; import { MongoClient } from '../../../src/mongo_client'; import { Int32, Long, serialize } from '../../mongodb'; -describe('StateMachine', function () { - class MockRequest implements MongoCryptKMSRequest { - _bytesNeeded: number; - endpoint = 'some.fake.host.com'; - _kmsProvider = 'aws'; - - constructor( - public _message: Buffer, - bytesNeeded - ) { - this._bytesNeeded = typeof bytesNeeded === 'number' ? bytesNeeded : 1024; - } +class MockRequest implements MongoCryptKMSRequest { + _bytesNeeded: number; + endpoint = 'some.fake.host.com'; + _kmsProvider = 'aws'; + + constructor( + public _message: Buffer, + bytesNeeded + ) { + this._bytesNeeded = typeof bytesNeeded === 'number' ? bytesNeeded : 1024; + } - get message() { - return this._message; - } + get message() { + return this._message; + } - get bytesNeeded() { - return this._bytesNeeded; - } + get bytesNeeded() { + return this._bytesNeeded; + } - get kmsProvider() { - return this._kmsProvider; - } + get kmsProvider() { + return this._kmsProvider; + } - get status() { - return { type: 1, code: 2, message: 'something went wrong' }; - } + get status() { + return { type: 1, code: 2, message: 'something went wrong' }; + } - addResponse(buffer) { - this._bytesNeeded -= buffer.length; - } + addResponse(buffer) { + this._bytesNeeded -= buffer.length; } +} +describe('StateMachine', function () { describe('#markCommand', function () { let runCommandStub; let dbStub; From 2e3a84c340372f1626651c6ea10f5a0a575a17c0 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 30 Sep 2024 10:49:59 -0400 Subject: [PATCH 030/136] revert state machine test changes --- .../state_machine.test.ts | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 8245168c720..77f3cf3a824 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -16,40 +16,40 @@ import { Db } from '../../../src/db'; import { MongoClient } from '../../../src/mongo_client'; import { Int32, Long, serialize } from '../../mongodb'; -class MockRequest implements MongoCryptKMSRequest { - _bytesNeeded: number; - endpoint = 'some.fake.host.com'; - _kmsProvider = 'aws'; - - constructor( - public _message: Buffer, - bytesNeeded - ) { - this._bytesNeeded = typeof bytesNeeded === 'number' ? bytesNeeded : 1024; - } +describe('StateMachine', function () { + class MockRequest implements MongoCryptKMSRequest { + _bytesNeeded: number; + endpoint = 'some.fake.host.com'; + _kmsProvider = 'aws'; + + constructor( + public _message: Buffer, + bytesNeeded + ) { + this._bytesNeeded = typeof bytesNeeded === 'number' ? bytesNeeded : 1024; + } - get message() { - return this._message; - } + get message() { + return this._message; + } - get bytesNeeded() { - return this._bytesNeeded; - } + get bytesNeeded() { + return this._bytesNeeded; + } - get kmsProvider() { - return this._kmsProvider; - } + get kmsProvider() { + return this._kmsProvider; + } - get status() { - return { type: 1, code: 2, message: 'something went wrong' }; - } + get status() { + return { type: 1, code: 2, message: 'something went wrong' }; + } - addResponse(buffer) { - this._bytesNeeded -= buffer.length; + addResponse(buffer) { + this._bytesNeeded -= buffer.length; + } } -} -describe('StateMachine', function () { describe('#markCommand', function () { let runCommandStub; let dbStub; From 800316376db7eb814b60dfd1c0306e1d86c62fe9 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 11 Apr 2024 17:15:41 -0400 Subject: [PATCH 031/136] feat(NODE-6090): Implement CSOT logic for connection checkout and server selection --- src/admin.ts | 3 +- src/cmap/connection.ts | 4 + src/cmap/connection_pool.ts | 53 ++- src/collection.ts | 5 + src/db.ts | 6 + src/error.ts | 9 + src/index.ts | 1 + src/operations/command.ts | 2 + src/operations/find.ts | 3 +- src/operations/operation.ts | 8 + src/operations/run_command.ts | 9 +- src/sdam/server.ts | 3 +- src/sdam/topology.ts | 54 ++- src/timeout.ts | 14 + src/utils.ts | 10 + ...ient_side_operations_timeout.prose.test.ts | 315 +++++++++++++----- ...lient_side_operations_timeout.unit.test.ts | 140 +++++--- .../node_csot.test.ts | 75 ++++- test/unit/cmap/connection_pool.test.js | 33 +- test/unit/index.test.ts | 1 + 20 files changed, 570 insertions(+), 178 deletions(-) diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..e030384eafc 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 0837c54d3fa..507b95b0b98 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,6 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type Timeout } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -94,6 +95,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeout?: Timeout; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..79440db1e06 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,13 +21,14 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; +import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,37 +355,57 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { - const checkoutTime = now(); + async checkOut(options?: { timeout?: Timeout }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + let timeout: Timeout | null = null; + if (options?.timeout) { + // CSOT enabled + // Determine if we're using the timeout passed in or a new timeout + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + // This check determines whether or not Topology.selectServer used the configured + // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + // server selection used `timeoutMS`, so we should use the existing timeout as the timeout + // here + timeout = options.timeout; + } else { + // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with + // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut + // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking + timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); + } + } + } else { + timeout = Timeout.expires(waitQueueTimeoutMS); + } const waitQueueMember: WaitQueueMember = { resolve, - reject, - timeout, - checkoutTime + reject }; this[kWaitQueue].push(waitQueueMember); process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +416,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options?.timeout) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (timeout !== options?.timeout) timeout?.clear(); } } @@ -764,7 +791,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +811,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +853,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..dbd91371cce 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -262,6 +262,11 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior diff --git a/src/db.ts b/src/db.ts index 53c18e44af6..6e1aa194acf 100644 --- a/src/db.ts +++ b/src/db.ts @@ -222,6 +222,11 @@ export class Db { return this.s.namespace.toString(); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -272,6 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/error.ts b/src/error.ts index 4aed6b93146..bd78e8883b2 100644 --- a/src/error.ts +++ b/src/error.ts @@ -815,6 +815,15 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @internal + */ +export class MongoOperationTimeoutError extends MongoRuntimeError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/index.ts b/src/index.ts index 97f964ce546..c990afbb46f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -65,6 +65,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, diff --git a/src/operations/command.ts b/src/operations/command.ts index 94ccc6ceafe..c64b4ae963a 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -65,6 +65,7 @@ export interface OperationParent { writeConcern?: WriteConcern; readPreference?: ReadPreference; bsonOptions?: BSONSerializeOptions; + timeoutMS?: number; } /** @internal */ @@ -131,6 +132,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeout: this.timeout, readPreference: this.readPreference, session }; diff --git a/src/operations/find.ts b/src/operations/find.ts index a040af73bc6..0f81f2d61f2 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -116,7 +116,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeout: this.timeout }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/operation.ts b/src/operations/operation.ts index b51cca40201..0599b72b96d 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type Timeout } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -56,6 +57,11 @@ export abstract class AbstractOperation { options: OperationOptions; + /** @internal */ + timeout?: Timeout; + /** @internal */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -73,6 +79,8 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; + + this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..56462fa8843 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -14,6 +14,8 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** @internal */ + timeoutMS?: number; } & BSONSerializeOptions; /** @internal */ @@ -39,10 +41,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }, this.options.responseType ); + return res; } } @@ -68,7 +72,8 @@ export class RunAdminCommandOperation extends AbstractOperation const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }); return res; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 4c1d37519ad..3d2a3ca1a31 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,7 +311,7 @@ export class Server extends TypedEventEmitter { this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } @@ -336,6 +336,7 @@ export class Server extends TypedEventEmitter { operationError.code === MONGODB_ERROR_CODES.Reauthenticate ) { await this.pool.reauthenticate(conn); + // TODO(NODE-5682): Implement CSOT support for socket read/write at the connection layer try { const res = await conn.command(ns, cmd, finalOptions, responseType); throwIfWriteConcernError(res); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..4c9d71d807d 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -37,6 +38,7 @@ import { Timeout, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, + csotMin, type EventEmitterWithState, HostAddress, List, @@ -107,7 +109,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -457,8 +458,14 @@ export class Topology extends TypedEventEmitter { } } + const timeoutMS = this.client.options.timeoutMS; + const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const selectServerOptions = { + operationName: 'ping', + timeout, + ...options + }; try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), @@ -467,7 +474,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +563,25 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; + let timeout: Timeout | null; + if (options.timeout) { + // CSOT Enabled + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + timeout = options.timeout; + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } + } else { + timeout = null; + } + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +604,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (timeout !== options.timeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +617,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +627,14 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +654,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeout) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (timeout !== options.timeout) timeout?.clear(); } } /** @@ -889,8 +922,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +975,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1062,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..7af1a23f261 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -40,6 +40,16 @@ export class Timeout extends Promise { public duration: number; public timedOut = false; + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } + /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = false) { let reject!: Reject; @@ -78,6 +88,10 @@ export class Timeout extends Promise { this.id = undefined; } + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out'); + } + public static expires(durationMS: number, unref?: boolean): Timeout { return new Timeout(undefined, durationMS, unref); } diff --git a/src/utils.ts b/src/utils.ts index 5ad754c9321..ebc0784cb1f 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -544,6 +544,10 @@ export function resolveOptions( result.readPreference = readPreference; } + const timeoutMS = options?.timeoutMS; + + result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + return result; } @@ -1379,6 +1383,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..903ea9c3bb4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,30 @@ /* Specification prose tests */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now +} from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + context.skip('1. Multi-batch writes', () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -31,7 +53,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { + context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { /** * This test MUST only be run against enterprise server versions 4.2 and higher. * @@ -42,7 +64,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('3. ClientEncryption', () => { + context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, * `LOCAL_MASTERKEY` refers to the following base64: @@ -132,7 +154,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('4. Background Connection Pooling', () => { + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -192,7 +214,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('5. Blocking Iteration Methods', () => { + context.skip('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -251,7 +273,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('6. GridFS - Upload', () => { + context.skip('6. GridFS - Upload', () => { /** Tests in this section MUST only be run against server versions 4.4 and higher. */ context('uploads via openUploadStream can be timed out', () => { @@ -306,7 +328,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('7. GridFS - Download', () => { + context.skip('7. GridFS - Download', () => { /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -351,96 +373,225 @@ describe.skip('CSOT spec prose tests', () => { }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }); + }); + + it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); + + it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); }); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + context.skip('9. endSession', () => { /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -472,7 +623,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('10. Convenient Transactions', () => { + context.skip('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..c1426d8db1d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,105 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(Timeout.expires).to.have.been.calledWith(10000); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + // Check that we passed through the timeout + expect(checkoutSpy.firstCall.args[0].timeout).to.equal( + selectServerSpy.lastCall.lastArg.timeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + context.skip('Client side encryption', function () { + context( + 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', + () => {} + ); + + context( + 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', + () => {} + ); + }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..5636eb00db7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -7,7 +7,9 @@ import { type Collection, type Db, type FindCursor, - type MongoClient + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoOperationTimeoutError } from '../../mongodb'; describe('CSOT driver tests', () => { @@ -94,4 +96,75 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..18048befab4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,7 +5,7 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); @@ -26,6 +26,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -98,7 +101,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -114,23 +117,15 @@ describe('Connection Pool', function () { pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut(); + const err = await pool.checkOut().catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index 883cc4b4ba7..56fe7aebaa6 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -109,6 +109,7 @@ const EXPECTED_EXPORTS = [ 'MongoTailableCursorError', 'MongoTopologyClosedError', 'MongoTransactionError', + 'MongoOperationTimeoutError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', 'WriteConcernErrorResult', From a216ae6ce20d8938649f9f71e0746a90b01c945f Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 10 Jun 2024 10:46:02 -0400 Subject: [PATCH 032/136] test(NODE-6120): Implement Unified test runner changes for CSOT (#4121) --- test/spec/unified-test-format/Makefile | 37 +++++- .../collectionData-createOptions.yml | 7 +- .../valid-pass/createEntities-operation.json | 74 ++++++++++++ .../valid-pass/createEntities-operation.yml | 38 ++++++ .../valid-pass/entity-cursor-iterateOnce.json | 111 ++++++++++++++++++ .../valid-pass/entity-cursor-iterateOnce.yml | 59 ++++++++++ .../valid-pass/entity-find-cursor.json | 15 ++- .../valid-pass/entity-find-cursor.yml | 6 +- ...ectedEventsForClient-ignoreExtraEvents.yml | 2 +- .../valid-pass/matches-lte-operator.json | 78 ++++++++++++ .../valid-pass/matches-lte-operator.yml | 41 +++++++ .../valid-pass/poc-change-streams.json | 36 ++++++ .../valid-pass/poc-change-streams.yml | 18 +++ .../valid-pass/poc-crud.json | 2 +- .../valid-pass/poc-crud.yml | 2 +- .../valid-pass/poc-sessions.json | 2 +- .../valid-pass/poc-sessions.yml | 3 +- .../poc-transactions-convenient-api.json | 2 +- .../poc-transactions-convenient-api.yml | 2 +- .../poc-transactions-mongos-pin-auto.json | 2 +- .../poc-transactions-mongos-pin-auto.yml | 2 +- .../valid-pass/poc-transactions.json | 6 +- .../valid-pass/poc-transactions.yml | 6 +- test/tools/unified-spec-runner/match.ts | 32 ++++- test/tools/unified-spec-runner/schema.ts | 1 + 25 files changed, 547 insertions(+), 37 deletions(-) create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.yml create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.json create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.yml diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index f92004c7760..7b2668e88a0 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -24,6 +24,7 @@ import { Long, MongoBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -97,6 +98,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -105,7 +119,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -116,7 +131,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -129,7 +145,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -389,6 +406,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -758,6 +778,12 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; From aca96617c94a4a0f09f7800c9ec902b8b2a84b68 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 21 Jun 2024 12:06:30 -0400 Subject: [PATCH 033/136] refactor(NODE-6187): refactor to use TimeoutContext abstraction (#4131) --- src/bulk/common.ts | 4 + src/cmap/connection.ts | 4 +- src/cmap/connection_pool.ts | 39 +--- src/index.ts | 18 +- src/operations/aggregate.ts | 5 +- src/operations/bulk_write.ts | 11 +- src/operations/command.ts | 8 +- src/operations/count.ts | 9 +- src/operations/create_collection.ts | 18 +- src/operations/delete.ts | 21 +- src/operations/distinct.ts | 9 +- src/operations/drop.ts | 24 ++- src/operations/estimated_document_count.ts | 9 +- src/operations/execute_operation.ts | 16 +- src/operations/find.ts | 6 +- src/operations/find_and_modify.ts | 9 +- src/operations/get_more.ts | 5 +- src/operations/indexes.ts | 22 +- src/operations/insert.ts | 19 +- src/operations/kill_cursors.ts | 12 +- src/operations/list_collections.ts | 5 +- src/operations/list_databases.ts | 11 +- src/operations/operation.ts | 10 +- src/operations/profiling_level.ts | 9 +- src/operations/remove_user.ts | 9 +- src/operations/rename.ts | 9 +- src/operations/run_command.ts | 17 +- src/operations/search_indexes/create.ts | 12 +- src/operations/search_indexes/drop.ts | 9 +- src/operations/search_indexes/update.ts | 9 +- src/operations/set_profiling_level.ts | 6 +- src/operations/stats.ts | 9 +- src/operations/update.ts | 24 ++- src/operations/validate_collection.ts | 9 +- src/sdam/server.ts | 12 +- src/sdam/topology.ts | 55 +++-- src/timeout.ts | 166 +++++++++++++- ...lient_side_operations_timeout.unit.test.ts | 12 +- .../node_csot.test.ts | 2 +- test/tools/cmap_spec_runner.ts | 12 +- test/unit/cmap/connection_pool.test.js | 22 +- test/unit/error.test.ts | 19 +- test/unit/operations/get_more.test.ts | 2 +- test/unit/sdam/topology.test.ts | 76 +++++-- test/unit/timeout.test.ts | 204 +++++++++++++++++- 45 files changed, 796 insertions(+), 202 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index c133a57d227..9eb63382443 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -20,6 +20,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, type Callback, @@ -873,6 +874,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } const executeCommandsAsync = promisify(executeCommands); diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507b95b0b98..f7bb1789b7c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,7 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type TimeoutContext } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -97,7 +97,7 @@ export interface CommandOptions extends BSONSerializeOptions { directConnection?: boolean; /** @internal */ - timeout?: Timeout; + timeoutContext?: TimeoutContext; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 79440db1e06..5369cc155aa 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -27,8 +27,8 @@ import { } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type TimeoutContext, TimeoutError } from '../timeout'; +import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -355,41 +355,15 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(options?: { timeout?: Timeout }): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - let timeout: Timeout | null = null; - if (options?.timeout) { - // CSOT enabled - // Determine if we're using the timeout passed in or a new timeout - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - // This check determines whether or not Topology.selectServer used the configured - // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - // server selection used `timeoutMS`, so we should use the existing timeout as the timeout - // here - timeout = options.timeout; - } else { - // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with - // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut - // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking - timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); - } - } - } else { - timeout = Timeout.expires(waitQueueTimeoutMS); - } + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, @@ -404,6 +378,7 @@ export class ConnectionPool extends TypedEventEmitter { return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; this.emitAndLog( @@ -416,7 +391,7 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); - if (options?.timeout) { + if (options.timeoutContext.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during connection checkout', { cause: timeoutError }); @@ -425,7 +400,7 @@ export class ConnectionPool extends TypedEventEmitter { } throw error; } finally { - if (timeout !== options?.timeout) timeout?.clear(); + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } diff --git a/src/index.ts b/src/index.ts index c990afbb46f..218c5f9e4cf 100644 --- a/src/index.ts +++ b/src/index.ts @@ -564,7 +564,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -595,7 +601,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index a5a267ac3e4..50494cbba73 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -3,6 +3,7 @@ import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/r import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -97,7 +98,8 @@ export class AggregateOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -142,6 +144,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/command.ts b/src/operations/command.ts index c64b4ae963a..5bd80f796d1 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -7,6 +7,7 @@ import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { commandSupportsReadConcern, decorateWithExplain, @@ -112,19 +113,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -132,7 +136,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, - timeout: this.timeout, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..82330a11e76 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -36,7 +37,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +64,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..afb2680b9a0 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -124,7 +125,11 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; @@ -155,7 +160,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +168,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +178,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +186,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +204,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..787bb6e7d0f 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 97e60450739..39937c8abf4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,7 +24,8 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { supportsRetryableWrites } from '../utils'; +import { TimeoutContext } from '../timeout'; +import { squashError, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -86,6 +87,12 @@ export async function executeOperation< ); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -109,7 +116,8 @@ export async function executeOperation< return await tryOperation(operation, { topology, session, - readPreference + readPreference, + timeoutContext }); } finally { if (session?.owner != null && session.owner === owner) { @@ -260,7 +268,7 @@ async function tryOperation< } try { - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; diff --git a/src/operations/find.ts b/src/operations/find.ts index 0f81f2d61f2..5f359324d56 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -5,6 +5,7 @@ import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -98,7 +99,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -117,7 +119,7 @@ export class FindOperation extends CommandOperation { ...this.bsonOptions, documentsReturnedIn: 'firstBatch', session, - timeout: this.timeout + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..c96a5d73453 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,9 +349,13 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } @@ -379,7 +388,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +403,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..702db0fe3f2 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -54,12 +55,14 @@ export class ListCollectionsOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 0599b72b96d..97e12871ee2 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,7 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type Timeout, type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -79,15 +79,17 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; - - this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index 56462fa8843..b91e2d0344e 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -33,7 +34,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -42,7 +47,7 @@ export class RunCommandOperation extends AbstractOperation { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }, this.options.responseType ); @@ -67,13 +72,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 7e5e55d18d6..9661026e3eb 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -32,14 +33,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index 4e287cca012..e9ea0ad01ce 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -4,6 +4,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -31,7 +36,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index aad7f93536c..e88e777d675 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -27,7 +32,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 3d2a3ca1a31..08325086d53 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4c9d71d807d..6117b5317cd 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -34,11 +34,10 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, - csotMin, type EventEmitterWithState, HostAddress, List, @@ -179,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-5685): Make this required + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -458,13 +460,20 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.options.timeoutMS; - const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; + const timeoutMS = this.client.s.options.timeoutMS; + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; + + const timeoutContext = TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { operationName: 'ping', - timeout, - ...options + ...options, + timeoutContext }; try { const server = await this.selectServer( @@ -474,7 +483,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -563,24 +572,10 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } - const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; - let timeout: Timeout | null; - if (options.timeout) { - // CSOT Enabled - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - timeout = options.timeout; - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); - } - } else { - timeout = null; - } - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); } const isSharded = this.description.type === TopologyType.Sharded; @@ -604,7 +599,7 @@ export class Topology extends TypedEventEmitter { ) ); } - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } @@ -654,7 +649,7 @@ export class Topology extends TypedEventEmitter { ); } - if (options.timeout) { + if (options.timeoutContext?.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during server selection', { cause: timeoutError }); @@ -664,7 +659,7 @@ export class Topology extends TypedEventEmitter { // Other server selection error throw error; } finally { - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** diff --git a/src/timeout.ts b/src/timeout.ts index 7af1a23f261..3d65992a02b 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,7 +1,7 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { @@ -107,3 +107,165 @@ export class Timeout extends Promise { ); } } + +/** @internal */ +export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract csotEnabled(): this is CSOTTimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _maxTimeMS?: number; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this._maxTimeMS ?? -1; + } + + set maxTimeMS(v: number) { + this._maxTimeMS = v; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object') { + const usingServerSelectionTimeoutMS = + this.serverSelectionTimeoutMS !== 0 && + csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; + + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + } else { + if (this.timeoutMS > 0) { + this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (typeof this._connectionCheckoutTimeout !== 'object') { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } +} diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c1426d8db1d..c4989f58d7f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -33,16 +33,20 @@ describe('CSOT spec unit tests', function () { client = this.configuration.newClient({ timeoutMS: 1000 }); // Spy on connection checkout and pull options argument const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); - const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); const expiresSpy = sinon.spy(Timeout, 'expires'); await client.db('db').collection('collection').insertOne({ x: 1 }); expect(checkoutSpy).to.have.been.calledOnce; - expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; // Check that we passed through the timeout - expect(checkoutSpy.firstCall.args[0].timeout).to.equal( - selectServerSpy.lastCall.lastArg.timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout ); // Check that no more Timeouts are constructed after we enter checkout diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 5636eb00db7..17d85ba5b23 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -143,7 +143,7 @@ describe('CSOT driver tests', () => { }); it('throws a MongoOperationTimeoutError', { - metadata: { requires: { mongodb: '>=4.4' } }, + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index f6d7e68bedc..9bb2abdb87a 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -12,7 +12,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -185,7 +186,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 18048befab4..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -10,8 +10,10 @@ const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -44,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -64,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -93,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -114,11 +122,15 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - const conn = await pool.checkOut(); - const err = await pool.checkOut().catch(e => e); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); sinon.stub(pool, 'availableConnectionCount').get(() => 0); pool.checkIn(conn); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..bdc049cbc4f 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -28,6 +28,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -376,11 +377,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +426,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..17bc20f6fa7 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -69,7 +69,7 @@ describe('GetMoreOperation', function () { const call = stub.getCall(0); expect(call.args[0]).to.equal(namespace); expect(call.args[1]).to.deep.equal(expectedGetMoreCommand); - expect(call.args[2]).to.deep.equal(opts); + expect(call.args[2]).to.containSubset(opts); }); }); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..119d0516a9c 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,6 +1,14 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; @@ -115,3 +123,197 @@ describe('Timeout', function () { }); }); }); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); + }); + }); + + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); + }); + }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); + }); + }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); + }); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); + + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); + }); + }); + }); +}); From 3051def34290bc4d64ba55fb8f54454ec0a4d86e Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 22 Jul 2024 11:17:22 -0400 Subject: [PATCH 034/136] refactor(NODE-6230): executeOperation to use iterative retry mechanism (#4157) --- src/cmap/connection_pool.ts | 6 ++++-- src/operations/execute_operation.ts | 27 ++++++++++++++++----------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5369cc155aa..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -28,7 +28,7 @@ import { import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { type TimeoutContext, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -356,6 +356,7 @@ export class ConnectionPool extends TypedEventEmitter { * explicitly destroyed by the new owner. */ async checkOut(options: { timeoutContext: TimeoutContext }): Promise { + const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) @@ -367,7 +368,8 @@ export class ConnectionPool extends TypedEventEmitter { const waitQueueMember: WaitQueueMember = { resolve, - reject + reject, + checkoutTime }; this[kWaitQueue].push(waitQueueMember); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 39937c8abf4..efd92f19de3 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -25,7 +25,7 @@ import { import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import { TimeoutContext } from '../timeout'; -import { squashError, supportsRetryableWrites } from '../utils'; +import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -87,12 +87,6 @@ export async function executeOperation< ); } - timeoutContext ??= TimeoutContext.create({ - serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, - waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, - timeoutMS: operation.options.timeoutMS - }); - const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -112,12 +106,18 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, - readPreference, - timeoutContext + readPreference }); } finally { if (session?.owner != null && session.owner === owner) { @@ -156,6 +156,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -179,7 +180,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -197,7 +201,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); From df025f4cfa07e15b5eb9efa43fa428520b6d8316 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 26 Jul 2024 09:55:20 -0400 Subject: [PATCH 035/136] feat(NODE-5682): set maxTimeMS on commands and preempt I/O (#4174) Co-authored-by: Warren James --- src/admin.ts | 5 +- src/cmap/connection.ts | 66 ++++++++++++++++--- src/cmap/wire_protocol/on_data.ts | 17 ++++- src/db.ts | 2 +- src/sdam/topology.ts | 17 +++-- src/timeout.ts | 43 ++++++++++-- ...ient_side_operations_timeout.prose.test.ts | 20 +++--- ...lient_side_operations_timeout.spec.test.ts | 33 +++++++++- .../node_csot.test.ts | 1 - test/integration/node-specific/db.test.js | 22 ++----- test/spec/{index.js => index.ts} | 19 ++---- test/tools/cmap_spec_runner.ts | 3 +- test/tools/unified-spec-runner/entities.ts | 4 +- test/tools/unified-spec-runner/match.ts | 15 ++++- test/tools/unified-spec-runner/operations.ts | 8 +-- test/unit/tools/unified_spec_runner.test.ts | 2 +- 16 files changed, 200 insertions(+), 77 deletions(-) rename test/spec/{index.js => index.ts} (67%) diff --git a/src/admin.ts b/src/admin.ts index e030384eafc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -155,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index f7bb1789b7c..010bcb8c897 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -19,6 +19,7 @@ import { MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -30,7 +31,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type TimeoutContext } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -419,6 +420,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (options.timeoutContext?.csotEnabled()) { + const { maxTimeMS } = options.timeoutContext; + if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -433,7 +439,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -442,7 +450,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse || message.moreToCome) { @@ -452,7 +461,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -629,7 +648,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -641,8 +664,32 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + if (TimeoutError.is(error)) { + throw new MongoOperationTimeoutError('Timed out at socket write'); + } + throw error; + } + } + return await drainEvent; } /** @@ -654,10 +701,13 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); this.messageStream.resume(); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..a32c6b1b484 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,7 @@ import { type EventEmitter } from 'events'; +import { MongoOperationTimeoutError } from '../../error'; +import { type TimeoutContext, TimeoutError } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +20,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -86,6 +91,8 @@ export function onData(emitter: EventEmitter) { // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + // eslint-disable-next-line github/no-then + timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -97,8 +104,12 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - if (promise != null) promise.reject(err); - else error = err; + const timeoutError = TimeoutError.is(err) + ? new MongoOperationTimeoutError('Timed out during socket read') + : undefined; + + if (promise != null) promise.reject(timeoutError ?? err); + else error = timeoutError ?? err; void closeHandler(); } diff --git a/src/db.ts b/src/db.ts index 6e1aa194acf..48501bc497e 100644 --- a/src/db.ts +++ b/src/db.ts @@ -277,7 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS, + timeoutMS: options?.timeoutMS ?? this.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6117b5317cd..479003f0e35 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -460,29 +460,28 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.s.options.timeoutMS; + // TODO(NODE-6223): auto connect cannot use timeoutMS + // const timeoutMS = this.client.s.options.timeoutMS; const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const timeoutContext = TimeoutContext.create({ - timeoutMS, + timeoutMS: undefined, serverSelectionTimeoutMS, waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS }); - const selectServerOptions = { operationName: 'ping', ...options, timeoutContext }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { + if (!skipPingOnConnect && this.s.credentials) { await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); @@ -623,7 +622,11 @@ export class Topology extends TypedEventEmitter { try { timeout?.throwIfExpired(); - return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout diff --git a/src/timeout.ts b/src/timeout.ts index 3d65992a02b..cc90b8c2e72 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,6 +1,6 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { csotMin, noop } from './utils'; /** @internal */ @@ -51,7 +51,7 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { + private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; if (duration < 0) { @@ -163,6 +163,10 @@ export abstract class TimeoutContext { abstract get clearConnectionCheckoutTimeout(): boolean; + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + abstract csotEnabled(): this is CSOTTimeoutContext; } @@ -175,13 +179,15 @@ export class CSOTTimeoutContext extends TimeoutContext { clearConnectionCheckoutTimeout: boolean; clearServerSelectionTimeout: boolean; - private _maxTimeMS?: number; - private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + private start: number; constructor(options: CSOTTimeoutContextOptions) { super(); + this.start = Math.trunc(performance.now()); + this.timeoutMS = options.timeoutMS; this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; @@ -193,11 +199,12 @@ export class CSOTTimeoutContext extends TimeoutContext { } get maxTimeMS(): number { - return this._maxTimeMS ?? -1; + return this.remainingTimeMS - this.minRoundTripTime; } - set maxTimeMS(v: number) { - this._maxTimeMS = v; + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; } csotEnabled(): this is CSOTTimeoutContext { @@ -238,6 +245,20 @@ export class CSOTTimeoutContext extends TimeoutContext { } return this._connectionCheckoutTimeout; } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket write'); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket read'); + } } /** @internal */ @@ -268,4 +289,12 @@ export class LegacyTimeoutContext extends TimeoutContext { return Timeout.expires(this.options.waitQueueTimeoutMS); return null; } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 903ea9c3bb4..729bed42199 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -384,7 +384,7 @@ describe('CSOT spec prose tests', function () { clock.restore(); }); - it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. @@ -416,10 +416,11 @@ describe('CSOT spec prose tests', function () { await clock.tickAsync(11); expect(await maybeError).to.be.instanceof(MongoServerSelectionError); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -440,9 +441,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -462,9 +464,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -484,7 +487,8 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..f73f162204f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -3,7 +3,34 @@ import { join } from 'path'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const enabled = [ + 'override-collection-timeoutMS', + 'override-database-timeoutMS', + 'override-operation-timeoutMS' +]; + +const cursorOperations = [ + 'aggregate', + 'countDocuments', + 'listIndexes', + 'createChangeStream', + 'listCollections', + 'listCollectionNames' +]; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests(join('client-side-operations-timeout')); + for (const spec of specs) { + for (const test of spec.tests) { + // not one of the test suites listed in kickoff + if (!enabled.includes(spec.name)) { + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + + // Cursor operation + if (test.operations.find(operation => cursorOperations.includes(operation.name))) + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + } + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 17d85ba5b23..0c97b910836 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -48,7 +48,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 9bb2abdb87a..892f6311df5 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -427,7 +428,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..9f4e20a828e 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 7b2668e88a0..3e3ba86d0e6 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -173,7 +173,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -218,6 +219,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -371,7 +376,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -784,6 +789,12 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 9cc67174f3c..7a98c7ac978 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -303,6 +303,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); @@ -313,7 +314,7 @@ operations.set('drop', async ({ entities, operation }) => { operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -767,11 +768,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); }); From 83cd82b8d1c230f8bd016ae40a5c2ce1d5206e75 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 1 Aug 2024 16:08:39 -0400 Subject: [PATCH 036/136] feat(NODE-6231): Add CSOT behaviour for retryable reads and writes (#4186) --- src/operations/execute_operation.ts | 9 ++++--- src/timeout.ts | 26 ++++++++++++------- ...lient_side_operations_timeout.spec.test.ts | 13 +++++++++- ...lient_side_operations_timeout.unit.test.ts | 10 +++++-- .../node_csot.test.ts | 5 ---- test/tools/unified-spec-runner/match.ts | 2 ++ 6 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index efd92f19de3..c9135fa1c32 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -227,12 +227,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -276,7 +274,6 @@ async function tryOperation< return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -285,6 +282,10 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.serverSelectionTimeout?.clear(); + timeoutContext.connectionCheckoutTimeout?.clear(); } } diff --git a/src/timeout.ts b/src/timeout.ts index cc90b8c2e72..297a484b4ec 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -39,6 +39,7 @@ export class Timeout extends Promise { public ended: number | null = null; public duration: number; public timedOut = false; + public cleared = false; get remainingTime(): number { if (this.timedOut) return 0; @@ -53,7 +54,6 @@ export class Timeout extends Promise { /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; - if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } @@ -86,6 +86,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.cleared = true; } throwIfExpired(): void { @@ -213,16 +214,20 @@ export class CSOTTimeoutContext extends TimeoutContext { get serverSelectionTimeout(): Timeout | null { // check for undefined - if (typeof this._serverSelectionTimeout !== 'object') { + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError( + `Timed out in server selection after ${this.timeoutMS}ms` + ); const usingServerSelectionTimeoutMS = - this.serverSelectionTimeoutMS !== 0 && - csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; - + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; if (usingServerSelectionTimeoutMS) { - this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); } else { - if (this.timeoutMS > 0) { - this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); } else { this._serverSelectionTimeout = null; } @@ -233,7 +238,10 @@ export class CSOTTimeoutContext extends TimeoutContext { } get connectionCheckoutTimeout(): Timeout | null { - if (typeof this._connectionCheckoutTimeout !== 'object') { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { if (typeof this._serverSelectionTimeout === 'object') { // null or Timeout this._connectionCheckoutTimeout = this._serverSelectionTimeout; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index f73f162204f..e4c9eb3027c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -6,7 +6,9 @@ import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const enabled = [ 'override-collection-timeoutMS', 'override-database-timeoutMS', - 'override-operation-timeoutMS' + 'override-operation-timeoutMS', + 'retryability-legacy-timeouts', + 'retryability-timeoutMS' ]; const cursorOperations = [ @@ -18,6 +20,11 @@ const cursorOperations = [ 'listCollectionNames' ]; +const bulkWriteOperations = [ + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' +]; + describe('CSOT spec tests', function () { const specs = loadSpecTests(join('client-side-operations-timeout')); for (const spec of specs) { @@ -30,6 +37,10 @@ describe('CSOT spec tests', function () { // Cursor operation if (test.operations.find(operation => cursorOperations.includes(operation.name))) test.skipReason = 'TODO(NODE-5684): Not working yet'; + + if (bulkWriteOperations.includes(test.description)) + test.skipReason = + 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } runUnifiedSuite(specs); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c4989f58d7f..944d9b96048 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -7,7 +7,7 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; +import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -22,10 +22,16 @@ describe('CSOT spec unit tests', function () { it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); await client.db('db').collection('collection').insertOne({ x: 1 }); - expect(Timeout.expires).to.have.been.calledWith(10000); + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); expect(Timeout.expires).to.not.have.been.calledWith(999999); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 0c97b910836..63e2d97dd90 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,6 +1,5 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; -import * as sinon from 'sinon'; import { type ClientSession, @@ -13,10 +12,6 @@ import { } from '../../mongodb'; describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); - describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 3e3ba86d0e6..90996b9640e 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -789,6 +789,8 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their + // errorResponse field if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { From c36dce501f9d4e78817459e4d445a8bb966f8531 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 12 Aug 2024 16:46:06 -0400 Subject: [PATCH 037/136] feat(NODE-6312): add error transformation for server timeouts (#4192) --- src/cmap/connection.ts | 29 ++++ src/cmap/wire_protocol/responses.ts | 36 +++- .../node_csot.test.ts | 163 +++++++++++++++++- 3 files changed, 225 insertions(+), 3 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 010bcb8c897..ecc5ca9c0c7 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -16,6 +16,7 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, @@ -540,6 +541,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -613,6 +619,29 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index 6c166afd61e..12b68784272 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -11,7 +11,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -111,6 +111,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 63e2d97dd90..d7d4a4ede5a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,17 +1,23 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; import { + BSON, type ClientSession, type Collection, + Connection, type Db, type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, - MongoOperationTimeoutError + MongoOperationTimeoutError, + MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', () => { +describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -161,4 +167,157 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded; + let commandsFailed; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command failed', async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + }); + }); + + afterEach(() => sinon.restore()); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + }); + }); + }); }); From 3fe3e01422b15fed4abfb3d957567161bca09bc0 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 9 Sep 2024 11:11:11 -0400 Subject: [PATCH 038/136] feat(NODE-6313): add CSOT support to sessions and transactions (#4199) --- package-lock.json | 82 +++--- package.json | 2 +- src/cmap/connection.ts | 7 + src/cmap/wire_protocol/on_data.ts | 15 +- src/collection.ts | 12 +- src/db.ts | 22 +- src/error.ts | 3 + src/operations/execute_operation.ts | 8 +- src/sessions.ts | 255 ++++++++++++------ src/timeout.ts | 49 +++- src/transactions.ts | 7 +- src/utils.ts | 13 +- ...ient_side_operations_timeout.prose.test.ts | 167 +++++++++++- ...lient_side_operations_timeout.spec.test.ts | 18 +- .../node_csot.test.ts | 150 +++++++++++ .../sessions-inherit-timeoutMS.json | 28 +- .../sessions-inherit-timeoutMS.yml | 19 +- ...sessions-override-operation-timeoutMS.json | 32 ++- .../sessions-override-operation-timeoutMS.yml | 23 +- .../sessions-override-timeoutMS.json | 28 +- .../sessions-override-timeoutMS.yml | 19 +- test/tools/unified-spec-runner/entities.ts | 4 + test/tools/unified-spec-runner/match.ts | 19 +- test/tools/unified-spec-runner/operations.ts | 27 +- 24 files changed, 776 insertions(+), 233 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b07cd361d5..1d9cebf509b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", + "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index 479356905dc..2de0e1811f0 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index ecc5ca9c0c7..7ad367e6733 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -745,6 +745,13 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + if (TimeoutError.is(readError)) { + throw new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + } + throw readError; } finally { this.dataEvents = null; this.messageStream.pause(); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index a32c6b1b484..23fd88e2828 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,7 +1,6 @@ import { type EventEmitter } from 'events'; -import { MongoOperationTimeoutError } from '../../error'; -import { type TimeoutContext, TimeoutError } from '../../timeout'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -91,8 +90,11 @@ export function onData( // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); // eslint-disable-next-line github/no-then - timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); + timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -104,12 +106,9 @@ export function onData( function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - const timeoutError = TimeoutError.is(err) - ? new MongoOperationTimeoutError('Timed out during socket read') - : undefined; - if (promise != null) promise.reject(timeoutError ?? err); - else error = timeoutError ?? err; + if (promise != null) promise.reject(err); + else error = err; void closeHandler(); } diff --git a/src/collection.ts b/src/collection.ts index dbd91371cce..f3a206b0c7b 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -470,10 +470,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } diff --git a/src/db.ts b/src/db.ts index 48501bc497e..bd0b5450b8c 100644 --- a/src/db.ts +++ b/src/db.ts @@ -275,12 +275,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS ?? this.timeoutMS, - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -385,7 +389,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } diff --git a/src/error.ts b/src/error.ts index bd78e8883b2..c99083a937a 100644 --- a/src/error.ts +++ b/src/error.ts @@ -124,6 +124,9 @@ function isAggregateError(e: unknown): e is Error & { errors: Error[] } { * mongodb-client-encryption has a dependency on this error, it uses the constructor with a string argument */ export class MongoError extends Error { + get [Symbol.toStringTag]() { + return this.name; + } /** @internal */ [kErrorLabels]: Set; /** diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index c9135fa1c32..f9d9f9b63b4 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -58,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -81,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -107,6 +102,7 @@ export async function executeOperation< } timeoutContext ??= TimeoutContext.create({ + session, serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, timeoutMS: operation.options.timeoutMS diff --git a/src/sessions.ts b/src/sessions.ts index bad966ed71c..bbd1785275f 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,11 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +102,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** @internal */ + timeoutMS?: number; } /** @@ -115,7 +122,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -137,6 +144,9 @@ export class ClientSession /** @internal */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -149,7 +159,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -269,8 +279,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -286,10 +301,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -441,8 +452,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -492,8 +505,25 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (firstCommitError) { if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) { @@ -503,7 +533,7 @@ export class ClientSession this.unpin({ force: true }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (retryCommitError) { // If the retry failed, we process that error instead of the original @@ -535,8 +565,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -581,18 +616,45 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -647,96 +709,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - - let committed = false; - let result: any; - while (!committed) { - this.startTransaction(options); // may throw on error + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - result = await promise; + let committed = false; + let result: any; - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } + result = await promise; - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index 297a484b4ec..f057bdb90b4 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -52,12 +55,19 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = true) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; + if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -67,16 +77,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -90,11 +104,11 @@ export class Timeout extends Promise { } throwIfExpired(): void { - if (this.timedOut) throw new TimeoutError('Timed out'); + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); } static is(timeout: unknown): timeout is Timeout { @@ -107,10 +121,16 @@ export class Timeout extends Promise { typeof timeout.then === 'function' ); } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } } /** @internal */ -export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; /** @internal */ export type LegacyTimeoutContextOptions = { @@ -151,6 +171,7 @@ function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions /** @internal */ export abstract class TimeoutContext { static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); else throw new MongoRuntimeError('Unrecognized options'); @@ -183,7 +204,7 @@ export class CSOTTimeoutContext extends TimeoutContext { private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; public minRoundTripTime = 0; - private start: number; + public start: number; constructor(options: CSOTTimeoutContextOptions) { super(); @@ -217,8 +238,8 @@ export class CSOTTimeoutContext extends TimeoutContext { if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { const { remainingTimeMS, serverSelectionTimeoutMS } = this; if (remainingTimeMS <= 0) - throw new MongoOperationTimeoutError( - `Timed out in server selection after ${this.timeoutMS}ms` + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) ); const usingServerSelectionTimeoutMS = serverSelectionTimeoutMS !== 0 && @@ -258,14 +279,14 @@ export class CSOTTimeoutContext extends TimeoutContext { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket write'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); } get timeoutForSocketRead(): Timeout | null { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket read'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..db251c82c16 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; @@ -68,7 +68,10 @@ export interface TransactionOptions extends CommandOperationOptions { writeConcern?: WriteConcern; /** A default read preference for commands in this transaction */ readPreference?: ReadPreferenceLike; - /** Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds */ + /** + * Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds + * @deprecated This option is deprecated in favor of `timeoutMS` or `defaultTimeoutMS`. + */ maxCommitTimeMS?: number; } diff --git a/src/utils.ts b/src/utils.ts index ebc0784cb1f..04174813c9c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -517,6 +517,10 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -544,9 +548,14 @@ export function resolveOptions( result.readPreference = readPreference; } - const timeoutMS = options?.timeoutMS; + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } - result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + result.timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; return result; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 729bed42199..406aa53ed6a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,6 +1,7 @@ /* Specification prose tests */ import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { @@ -9,6 +10,7 @@ import { MongoServerSelectionError, now } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -595,7 +597,10 @@ describe('CSOT spec prose tests', function () { 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context.skip('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -625,12 +630,92 @@ describe('CSOT spec prose tests', function () { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context.skip('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -641,7 +726,7 @@ describe('CSOT spec prose tests', function () { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -658,6 +743,80 @@ describe('CSOT spec prose tests', function () { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index e4c9eb3027c..a178cecc5d2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,4 +1,5 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; @@ -8,7 +9,10 @@ const enabled = [ 'override-database-timeoutMS', 'override-operation-timeoutMS', 'retryability-legacy-timeouts', - 'retryability-timeoutMS' + 'retryability-timeoutMS', + 'sessions-override-operation-timeoutMS', + 'sessions-override-timeoutMS', + 'sessions-inherit-timeoutMS' ]; const cursorOperations = [ @@ -43,5 +47,15 @@ describe('CSOT spec tests', function () { 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } - runUnifiedSuite(specs); + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index d7d4a4ede5a..cc767c1d80a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -12,6 +12,7 @@ import { type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, + MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; @@ -320,4 +321,153 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { }); }); }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); }); diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 9f4e20a828e..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 90996b9640e..35c274dfbe0 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -512,6 +512,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -526,9 +533,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -537,9 +542,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -553,9 +556,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 7a98c7ac978..5b5b7040698 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -19,6 +19,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +50,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -241,7 +237,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -371,7 +372,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -718,13 +719,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -945,7 +950,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -959,7 +964,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); From 7b4aa8496e2d0a80762a2e3343e547df99011106 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 11:35:27 -0400 Subject: [PATCH 039/136] feat(NODE-6304): add CSOT support for non-tailable cursors (#4195) --- src/cmap/connection.ts | 4 +- src/cmap/wire_protocol/on_data.ts | 1 - src/collection.ts | 6 +- src/cursor/abstract_cursor.ts | 146 +++- src/cursor/aggregation_cursor.ts | 20 +- src/cursor/change_stream_cursor.ts | 6 +- src/cursor/find_cursor.ts | 2 +- src/cursor/list_collections_cursor.ts | 2 +- src/cursor/list_indexes_cursor.ts | 2 +- src/cursor/run_command_cursor.ts | 14 +- src/index.ts | 2 +- src/operations/aggregate.ts | 4 + src/operations/execute_operation.ts | 3 +- src/operations/find.ts | 4 + src/operations/indexes.ts | 9 +- src/operations/list_collections.ts | 3 + src/operations/operation.ts | 3 + src/operations/run_command.ts | 2 + src/sessions.ts | 12 +- src/timeout.ts | 27 +- ...ient_side_operations_timeout.prose.test.ts | 84 ++- ...lient_side_operations_timeout.spec.test.ts | 83 ++- .../node_csot.test.ts | 335 ++++++++- .../command-execution.json | 153 ++++ .../client-side-operations-timeout/README.md | 661 ++++++++++++++++++ .../change-streams.json | 20 +- .../change-streams.yml | 30 +- .../close-cursors.json | 12 +- .../close-cursors.yml | 12 +- .../command-execution.json | 2 +- .../command-execution.yml | 5 +- .../convenient-transactions.json | 22 +- .../convenient-transactions.yml | 15 +- .../deprecated-options.json | 2 +- .../deprecated-options.yml | 2 +- .../gridfs-advanced.yml | 2 +- .../non-tailable-cursors.json | 20 +- .../non-tailable-cursors.yml | 32 +- .../retryability-timeoutMS.json | 250 +++++++ .../retryability-timeoutMS.yml | 100 +++ .../tailable-awaitData.json | 14 +- .../tailable-awaitData.yml | 18 +- .../tailable-non-awaitData.json | 10 +- .../tailable-non-awaitData.yml | 12 +- test/tools/unified-spec-runner/operations.ts | 7 +- test/unit/cursor/aggregation_cursor.test.ts | 67 +- 46 files changed, 2008 insertions(+), 234 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json create mode 100644 test/spec/client-side-operations-timeout/README.md diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 7ad367e6733..507615e9f03 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -86,6 +86,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -421,7 +422,7 @@ export class Connection extends TypedEventEmitter { ...options }; - if (options.timeoutContext?.csotEnabled()) { + if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { const { maxTimeMS } = options.timeoutContext; if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } @@ -621,7 +622,6 @@ export class Connection extends TypedEventEmitter { for await (const document of this.sendCommand(ns, command, options, responseType)) { if (options.timeoutContext?.csotEnabled()) { if (MongoDBResponse.is(document)) { - // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT if (document.isMaxTimeExpiredError) { throw new MongoOperationTimeoutError('Server reported a timeout error', { cause: new MongoServerError(document.toObject()) diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 23fd88e2828..64c636f41f1 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -93,7 +93,6 @@ export function onData( const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; timeoutForSocketRead?.throwIfExpired(); - // eslint-disable-next-line github/no-then timeoutForSocketRead?.then(undefined, errorHandler); return iterator; diff --git a/src/collection.ts b/src/collection.ts index f3a206b0c7b..a73a5276f5f 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -678,7 +678,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + if (error instanceof MongoOperationTimeoutError) throw error; // TODO: Check the spec for index management behaviour/file a drivers ticket for this + // Seems like we should throw all errors return false; } } diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..d0f386923ad 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,17 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** @public*/ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** @public + * TODO(NODE-5688): Document and release + * */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -105,6 +117,8 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { noCursorTimeout?: boolean; /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -117,6 +131,8 @@ export type InternalAbstractCursorOptions = Omit { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -658,6 +727,8 @@ export abstract class AbstractCursor< this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -707,7 +778,7 @@ export abstract class AbstractCursor< } ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,6 +789,12 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }); + } try { const state = await this._initialize(this.cursorSession); const response = state.response; @@ -729,7 +806,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -763,6 +840,7 @@ export abstract class AbstractCursor< // otherwise need to call getMore const batchSize = this.cursorOptions.batchSize || 1000; + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; try { const response = await this.getMore(batchSize); @@ -770,7 +848,7 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); + await this.cleanup(undefined, error); } catch (error) { // `cleanupCursor` should never throw, squash and throw the original error squashError(error); @@ -791,7 +869,7 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; try { @@ -806,11 +884,23 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + let timeoutContext: TimeoutContext | undefined; + if (timeoutMS != null) { + this.timeoutContext?.clear(); + timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }); + } else { + this.timeoutContext?.refresh(); + timeoutContext = this.timeoutContext; + } await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContext ); } } catch (error) { diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 9762c8a03bf..056f28454ce 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,4 +1,5 @@ import type { Document } from '../bson'; +import { MongoAPIError } from '../error'; import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; @@ -9,6 +10,7 @@ import { mergeOptions, type MongoDBNamespace } from '../utils'; import { AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -38,6 +40,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -60,7 +71,7 @@ export class AggregationCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } @@ -95,6 +106,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..13f58675552 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 83a12818bd0..96b764dc7ff 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -69,7 +69,7 @@ export class FindCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, findOperation); + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..6b31ce2263a 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,20 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** @internal */ + timeoutMS?: number; + /** @internal */ + timeoutMode?: CursorTimeoutMode; } & BSONSerializeOptions; /** @public */ @@ -105,7 +113,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +131,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/index.ts b/src/index.ts index 218c5f9e4cf..7f948f30ed4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -108,7 +108,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, type CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 50494cbba73..096fe372715 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -36,6 +37,9 @@ export interface AggregateOptions extends CommandOperationOptions { let?: Document; out?: string; + + /** @internal */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index f9d9f9b63b4..dd9ba06c514 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -280,8 +280,7 @@ async function tryOperation< previousOperationError = operationError; // Reset timeouts - timeoutContext.serverSelectionTimeout?.clear(); - timeoutContext.connectionCheckoutTimeout?.clear(); + timeoutContext.clear(); } } diff --git a/src/operations/find.ts b/src/operations/find.ts index 5f359324d56..c39695cc0bc 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -64,6 +65,9 @@ export interface FindOptions * @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored. */ oplogReplay?: boolean; + + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index c96a5d73453..220d438d834 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,7 +1,7 @@ import type { Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Collection } from '../collection'; -import { type AbstractCursorOptions } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; @@ -360,7 +360,12 @@ export class DropIndexOperation extends CommandOperation { } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index 702db0fe3f2..50df243a3ff 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,5 +1,6 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -16,6 +17,8 @@ export interface ListCollectionsOptions extends Omit { public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; public cleared = false; get remainingTime(): number { @@ -100,6 +100,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; this.cleared = true; } @@ -190,6 +191,10 @@ export abstract class TimeoutContext { abstract get timeoutForSocketRead(): Timeout | null; abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; } /** @internal */ @@ -288,6 +293,18 @@ export class CSOTTimeoutContext extends TimeoutContext { if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } } /** @internal */ @@ -326,4 +343,12 @@ export class LegacyTimeoutContext extends TimeoutContext { get timeoutForSocketRead(): Timeout | null { return null; } + + refresh(): void { + return; + } + + clear(): void { + return; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 406aa53ed6a..0d36998fd96 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { type CommandStartedEvent } from '../../../mongodb'; import { + type CommandSucceededEvent, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -216,12 +218,52 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('5. Blocking Iteration Methods', () => { + context('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 20 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient.db('db').dropCollection('coll'); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -248,6 +290,29 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it.skip('send correct number of finds and getMores', async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true, awaitData: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6305)'; }); context('Change Streams', () => { @@ -272,6 +337,23 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it.skip('sends correct number of aggregate and getMores', async function () { + const changeStream = client.db('db').collection('coll').watch(); + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 1 getMore + expect(getMores).to.have.lengthOf(1); + }).skipReason = 'TODO(NODE-6305)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index a178cecc5d2..99914fa08e7 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -4,49 +4,55 @@ import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -const enabled = [ - 'override-collection-timeoutMS', - 'override-database-timeoutMS', - 'override-operation-timeoutMS', - 'retryability-legacy-timeouts', - 'retryability-timeoutMS', - 'sessions-override-operation-timeoutMS', - 'sessions-override-timeoutMS', - 'sessions-inherit-timeoutMS' -]; +const skippedSpecs = { + bulkWrite: 'TODO(NODE-6274)', + 'change-streams': 'TODO(NODE-6035)', + 'convenient-transactions': 'TODO(NODE-5687)', + 'deprecated-options': 'TODO(NODE-5689)', + 'gridfs-advanced': 'TODO(NODE-6275)', + 'gridfs-delete': 'TODO(NODE-6275)', + 'gridfs-download': 'TODO(NODE-6275)', + 'gridfs-find': 'TODO(NODE-6275)', + 'gridfs-upload': 'TODO(NODE-6275)', + 'tailable-awaitData': 'TODO(NODE-6035)', + 'tailable-non-awaitData': 'TODO(NODE-6035)' +}; -const cursorOperations = [ - 'aggregate', - 'countDocuments', - 'listIndexes', - 'createChangeStream', - 'listCollections', - 'listCollectionNames' -]; - -const bulkWriteOperations = [ - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' -]; +const skippedTests = { + 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': + 'TODO(NODE-6305)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': + 'TODO(NODE-6274)', + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': + 'TODO(NODE-6274)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' +}; describe('CSOT spec tests', function () { - const specs = loadSpecTests(join('client-side-operations-timeout')); + const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { - // not one of the test suites listed in kickoff - if (!enabled.includes(spec.name)) { - test.skipReason = 'TODO(NODE-5684): Not working yet'; + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; } - - // Cursor operation - if (test.operations.find(operation => cursorOperations.includes(operation.name))) - test.skipReason = 'TODO(NODE-5684): Not working yet'; - - if (bulkWriteOperations.includes(test.description)) - test.skipReason = - 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } + runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; if ( @@ -59,3 +65,10 @@ describe('CSOT spec tests', function () { return false; }); }); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); +}); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index cc767c1d80a..f5ada7eef9f 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,6 @@ /* Anything javascript specific relating to timeouts */ +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -7,6 +9,9 @@ import { BSON, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, Connection, type Db, type FindCursor, @@ -18,7 +23,9 @@ import { } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -171,8 +178,8 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('server-side maxTimeMS errors are transformed', () => { let client: MongoClient; - let commandsSucceeded; - let commandsFailed; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); @@ -221,18 +228,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command failed', async () => { - const error = await client - .db() - .command({ ping: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.property('code', 50); - - expect(commandsFailed).to.have.lengthOf(1); - expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { @@ -267,18 +278,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { afterEach(() => sinon.restore()); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client - .db('admin') - .command({ giveMeWriteErrors: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { @@ -306,22 +321,266 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: 'iteration', timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + await cursor.toArray(); + + expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores + expect( + commandStarted.filter(ev => { + return ( + ev.command.find != null && + ev.command.getMore != null && + ev.command.maxTimeMS != null + ); + }) + ).to.have.lengthOf(0); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient .db() - .collection('a') - .insertOne({}) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeConcernError.code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); }); }); }); + describe.skip('Tailable non-awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe.skip('Tailable awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 5b5b7040698..31414fa4664 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -215,7 +215,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -787,7 +788,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); From 3045a34131549d75f02435d06a3ea0c17eaa77de Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 12 Sep 2024 15:24:39 -0400 Subject: [PATCH 040/136] fix(NODE-6374): MongoOperationTimeoutError inherits MongoRuntimeError (#4237) --- etc/notes/errors.md | 6 +++++- src/error.ts | 21 ++++++++++++++++++--- test/unit/error.test.ts | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..114bc1b2e2c 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,10 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +- TODO(NODE-5688): Add MongoOperationTimeoutError documentation + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/src/error.ts b/src/error.ts index c99083a937a..fc6d1393ec8 100644 --- a/src/error.ts +++ b/src/error.ts @@ -310,7 +310,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -819,9 +819,24 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } /** - * @internal + * @public + * @category Error + * + * This error is thrown when an operation could not be completed within the specified `timeoutMS`. + * TODO(NODE-5688): expand this documentation. + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` */ -export class MongoOperationTimeoutError extends MongoRuntimeError { +export class MongoOperationTimeoutError extends MongoDriverError { override get name(): string { return 'MongoOperationTimeoutError'; } diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index bdc049cbc4f..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -173,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { From 11d059f04234f6951613159130b82d4b163dbb03 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 16:02:50 -0400 Subject: [PATCH 041/136] test: remove empty skipped context blocks (#4238) --- .../client-side-operations-timeout/node_csot.test.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f5ada7eef9f..56127cc8ace 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -576,11 +576,6 @@ describe('CSOT driver tests', metadata, () => { }); }); - describe.skip('Tailable non-awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe.skip('Tailable awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } From bfeeda9ced837c36f39fb650155cfa849076f3a0 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 17 Sep 2024 13:27:43 -0400 Subject: [PATCH 042/136] feat(NODE-5844): add iscryptd to ServerDescription (#4239) --- src/sdam/server_description.ts | 4 ++ .../server_description.test.ts | 56 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 test/integration/server-discovery-and-monitoring/server_description.test.ts diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index aadf523d722..a650c8dba97 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -75,6 +75,8 @@ export class ServerDescription { maxWriteBatchSize: number | null; /** The max bson object size. */ maxBsonObjectSize: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -123,6 +125,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -176,6 +179,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..0a3c7eecbf6 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,56 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; + +import { MongoClient } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); From 7a12914ba6c1d5700077b9e2068d41dbe3177439 Mon Sep 17 00:00:00 2001 From: Warren James Date: Wed, 25 Sep 2024 17:43:12 -0400 Subject: [PATCH 043/136] chore: allow clientBulkWrite to use TimeoutContext (#4251) --- .../client_bulk_write/client_bulk_write.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index b04c978114d..82d45793ac4 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -2,6 +2,7 @@ import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; @@ -35,14 +36,15 @@ export class ClientBulkWriteOperation extends CommandOperation { let command; if (server.description.type === ServerType.LoadBalancer) { if (session) { // Checkout a connection to build the command. - const connection = await server.pool.checkOut(); + const connection = await server.pool.checkOut({ timeoutContext }); // Pin the connection to the session so it get used to execute the command and we do not // perform a double check-in/check-out. session.pin(connection); @@ -69,7 +71,13 @@ export class ClientBulkWriteOperation extends CommandOperation Date: Tue, 1 Oct 2024 17:34:20 -0400 Subject: [PATCH 044/136] requested changes --- src/client-side-encryption/state_machine.ts | 56 +++---- ...lient_side_operations_timeout.unit.test.ts | 57 +++++-- .../state_machine.test.ts | 139 +++++++++++++++++- 3 files changed, 204 insertions(+), 48 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index ce7bdc483bb..eb0e759ece8 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -14,7 +14,7 @@ import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; -import { type CSOTTimeoutContext, Timeout, TimeoutError } from '../timeout'; +import { Timeout, type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -187,7 +187,7 @@ export class StateMachine { async execute( executor: StateMachineExecutable, context: MongoCryptContext, - timeoutContext?: CSOTTimeoutContext + timeoutContext?: TimeoutContext ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; @@ -212,7 +212,7 @@ export class StateMachine { metaDataClient, context.ns, filter, - timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null + timeoutContext ); if (collInfo) { context.addMongoOperationResponse(collInfo); @@ -233,20 +233,9 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind( - this, - mongocryptdClient, - context.ns, - command, - timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null - ) + this.markCommand.bind(this, mongocryptdClient, context.ns, command, timeoutContext) ) - : await this.markCommand( - mongocryptdClient, - context.ns, - command, - timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null - ); + : await this.markCommand(mongocryptdClient, context.ns, command, timeoutContext); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -259,7 +248,7 @@ export class StateMachine { keyVaultClient, keyVaultNamespace, filter, - timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null + timeoutContext ); if (keys.length === 0) { @@ -282,12 +271,7 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - await Promise.all( - this.requests( - context, - timeoutContext?.csotEnabled() ? timeoutContext.remainingTimeMS : null - ) - ); + await Promise.all(this.requests(context, timeoutContext)); context.finishKMSRequests(); break; } @@ -329,7 +313,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest, timeoutMS?: number | null): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutContext?: TimeoutContext): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -451,8 +435,8 @@ export class StateMachine { } }); - await (typeof timeoutMS === 'number' - ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutMS)]) + await (timeoutContext?.csotEnabled() + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) : willResolveKmsRequest); } catch (error) { if (error instanceof TimeoutError) @@ -464,13 +448,13 @@ export class StateMachine { } } - *requests(context: MongoCryptContext, timeoutMS?: number | null) { + *requests(context: MongoCryptContext, timeoutContext?: TimeoutContext) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request, timeoutMS); + yield this.kmsRequest(request, timeoutContext); } } @@ -531,7 +515,7 @@ export class StateMachine { client: MongoClient, ns: string, filter: Document, - timeoutMS?: number | null + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); @@ -540,9 +524,7 @@ export class StateMachine { .listCollections(filter, { promoteLongs: false, promoteValues: false, - ...(typeof timeoutMS === 'number' - ? { timeoutMS, timeoutMode: 'cursorLifetime' } - : undefined) + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined }) .toArray(); @@ -562,7 +544,7 @@ export class StateMachine { client: MongoClient, ns: string, command: Uint8Array, - timeoutMS?: number | null + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); const bsonOptions = { promoteLongs: false, promoteValues: false }; @@ -570,7 +552,7 @@ export class StateMachine { const response = await client.db(db).command(rawCommand, { ...bsonOptions, - ...(typeof timeoutMS === 'number' ? { timeoutMS, omitMaxTimeMS: true } : undefined) + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined }); return serialize(response, this.bsonOptions); @@ -588,7 +570,7 @@ export class StateMachine { client: MongoClient, keyVaultNamespace: string, filter: Uint8Array, - timeoutMS?: number | null + timeoutContext?: TimeoutContext ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -596,7 +578,9 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter), { timeoutMS: timeoutMS != null ? timeoutMS : undefined }) + .find(deserialize(filter, { allowObjectSmallerThanBufferSize: true }), { + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + }) .toArray(); } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 454f470683f..d2bcf9812b0 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -12,6 +12,7 @@ import { TLSSocket } from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { ConnectionPool, + CSOTTimeoutContext, type MongoClient, MongoOperationTimeoutError, Timeout, @@ -104,8 +105,8 @@ describe('CSOT spec unit tests', function () { }).skipReason = 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; - context('Client side encryption', function () { - it('The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', async function () { + describe('Client side encryption', function () { + describe('KMS requests', function () { const stateMachine = new StateMachine({} as any); const request = { addResponse: _response => {}, @@ -120,14 +121,52 @@ describe('CSOT spec unit tests', function () { message: Buffer.from('foobar') }; - const timeoutMS = 100; - sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { - await sleep(200); - return {} as TLSSocket; + context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { + await sleep(200); + return {} as TLSSocket; + }); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request times out through remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + sleep(300); + const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); + + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { + return {} as TLSSocket; + }); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(55000); + throw Error('Slept for 30s'); + }; + const err = await Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch( + e => e + ); + expect(err.message).to.equal('Slept for 30s'); + }); }); - const err = await stateMachine.kmsRequest(request, timeoutMS).catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - expect(err.errmsg).to.equal('KMS request timed out'); }); // TODO(NODE-6390): Add timeoutMS support to Auto Encryption diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..a289cd58b37 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -12,9 +12,17 @@ import * as tls from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { Db } from '../../../src/db'; -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { MongoClient } from '../../../src/mongo_client'; -import { Int32, Long, serialize } from '../../mongodb'; +import { + BSON, + Collection, + CSOTTimeoutContext, + Int32, + Long, + MongoClient, + serialize, + squashError +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; describe('StateMachine', function () { class MockRequest implements MongoCryptKMSRequest { @@ -461,4 +469,129 @@ describe('StateMachine', function () { expect.fail('missed exception'); }); }); + + describe('CSOT', function () { + describe('#fetchKeys', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let findSpy; + + beforeEach(async function () { + findSpy = sinon.spy(Collection.prototype, 'find'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.fetchKeys() is passed a `CSOTimeoutContext`', function () { + it('collection.find runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + }); + }); + + context('when StateMachine.fetchKeys() is not passed a `CSOTimeoutContext`', function () { + it('collection.find runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#markCommand', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let dbCommandSpy; + + beforeEach(async function () { + dbCommandSpy = sinon.spy(Db.prototype, 'command'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.markCommand() is passed a `CSOTimeoutContext`', function () { + it('db.command runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + }); + }); + + context('when StateMachine.markCommand() is not passed a `CSOTimeoutContext`', function () { + it('db.command runs with an undefined timeoutMS property', async function () { + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let listCollectionsSpy; + + beforeEach(async function () { + listCollectionsSpy = sinon.spy(Db.prototype, 'listCollections'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context( + 'when StateMachine.fetchCollectionInfo() is passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + }); + } + ); + + context( + 'when StateMachine.fetchCollectionInfo() is not passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + } + ); + }); + }); }); From 702a03eeb59174ea352f2d64546f32e0cc4c6e84 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 1 Oct 2024 17:43:58 -0400 Subject: [PATCH 045/136] lint fix --- ...ient_side_operations_timeout.prose.test.ts | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 8ed1a7dc93b..0a20a5b15dc 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -109,7 +109,6 @@ describe('CSOT spec prose tests', function () { ); // TODO(NODE-6391): Add timeoutMS support to Explicit Encryption - context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, @@ -570,24 +569,6 @@ describe('CSOT spec prose tests', function () { }).skipReason = 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); - const start = now(); - const maybeError = await client - .db('test') - .admin() - .ping() - .then( - () => null, - e => e - ); - const end = now(); - it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. From 3b6a23b27cef9d2cb0da00ecd7f1d0f345262880 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 1 Oct 2024 18:31:06 -0400 Subject: [PATCH 046/136] test fix --- src/client-side-encryption/state_machine.ts | 7 +++++-- test/unit/client-side-encryption/state_machine.test.ts | 4 +--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index eb0e759ece8..994ecf91a96 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -175,6 +175,7 @@ export type StateMachineOptions = { * An internal class that executes across a MongoCryptContext until either * a finishing state or an error is reached. Do not instantiate directly. */ +// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs export class StateMachine { constructor( private options: StateMachineOptions, @@ -552,7 +553,9 @@ export class StateMachine { const response = await client.db(db).command(rawCommand, { ...bsonOptions, - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS } + : undefined) }); return serialize(response, this.bsonOptions); @@ -578,7 +581,7 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter, { allowObjectSmallerThanBufferSize: true }), { + .find(deserialize(filter), { timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined }) .toArray(); diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index a289cd58b37..fe9659675a4 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -82,12 +82,10 @@ describe('StateMachine', function () { const options = { promoteLongs: false, promoteValues: false }; const serializedCommand = serialize(command); const stateMachine = new StateMachine({} as any); - // eslint-disable-next-line @typescript-eslint/no-empty-function - const callback = () => {}; context('when executing the command', function () { it('does not promote values', function () { - stateMachine.markCommand(clientStub, 'test.coll', serializedCommand, callback); + stateMachine.markCommand(clientStub, 'test.coll', serializedCommand); expect(runCommandStub.calledWith(command, options)).to.be.true; }); }); From 5560a1bb60262d85e7ad4f8f8925581f06c3c045 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 2 Oct 2024 13:42:39 -0400 Subject: [PATCH 047/136] no negative timeouts --- src/client-side-encryption/state_machine.ts | 17 +++++++++++++++++ src/sdam/server_description.ts | 1 - 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 994ecf91a96..656a8c0991e 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -436,6 +436,9 @@ export class StateMachine { } }); + if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { + throw new MongoOperationTimeoutError('Timed out before KMS request.'); + } await (timeoutContext?.csotEnabled() ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) : willResolveKmsRequest); @@ -520,6 +523,12 @@ export class StateMachine { ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); + if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { + throw new MongoOperationTimeoutError( + 'Timed out before call to mongocryptd listCollections operation.' + ); + } + const collections = await client .db(db) .listCollections(filter, { @@ -551,6 +560,11 @@ export class StateMachine { const bsonOptions = { promoteLongs: false, promoteValues: false }; const rawCommand = deserialize(command, bsonOptions); + if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { + throw new MongoOperationTimeoutError( + 'Timed out before call to mongocryptd markings request.' + ); + } const response = await client.db(db).command(rawCommand, { ...bsonOptions, ...(timeoutContext?.csotEnabled() @@ -578,6 +592,9 @@ export class StateMachine { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); + if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { + throw new MongoOperationTimeoutError('Timed out before dataKey fetched.'); + } return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index 5429843c267..a650c8dba97 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -75,7 +75,6 @@ export class ServerDescription { maxWriteBatchSize: number | null; /** The max bson object size. */ maxBsonObjectSize: number | null; - /** Indicates server is a mongocryptd instance. */ iscryptd: boolean; From be96466a4f33fb3f1a9869cab686bc4085c50356 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 2 Oct 2024 17:29:28 -0400 Subject: [PATCH 048/136] feat(NODE-6390): Add timeoutMS support to auto encryption --- src/client-side-encryption/auto_encrypter.ts | 20 ++++-- ...lient_side_operations_timeout.unit.test.ts | 37 +++++++++- .../auto_encrypter.test.ts | 71 ++++++++++++++++++- 3 files changed, 120 insertions(+), 8 deletions(-) diff --git a/src/client-side-encryption/auto_encrypter.ts b/src/client-side-encryption/auto_encrypter.ts index 5ac3945f5e4..c655e82d6b6 100644 --- a/src/client-side-encryption/auto_encrypter.ts +++ b/src/client-side-encryption/auto_encrypter.ts @@ -395,10 +395,16 @@ export class AutoEncrypter { socketOptions: autoSelectSocketOptions(this._client.options) }); - return deserialize(await stateMachine.execute(this, context), { - promoteValues: false, - promoteLongs: false - }); + return deserialize( + await stateMachine.execute( + this, + context, + options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined), + { + promoteValues: false, + promoteLongs: false + } + ); } /** @@ -416,7 +422,11 @@ export class AutoEncrypter { socketOptions: autoSelectSocketOptions(this._client.options) }); - return await stateMachine.execute(this, context); + return await stateMachine.execute( + this, + context, + options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined + ); } /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index d2bcf9812b0..77e8cf9376f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -11,6 +11,7 @@ import { TLSSocket } from 'tls'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { + BSON, ConnectionPool, CSOTTimeoutContext, type MongoClient, @@ -19,6 +20,8 @@ import { TimeoutContext, Topology } from '../../mongodb'; +/* eslint-disable @typescript-eslint/no-restricted-imports */ +import { AutoEncrypter } from '../../../src/client-side-encryption/auto_encrypter'; import { sleep } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests @@ -169,8 +172,40 @@ describe('CSOT spec unit tests', function () { }); }); + describe('CryptoConnection', function () { + let autoEncrypter; + beforeEach(async function () { + autoEncrypter = new AutoEncrypter(client, { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }); + await autoEncrypter.init(); + }); + + afterEach(async function () { + + }); + describe('#command', function () { + context('when encrypt is provided a timeoutContext', function () { + it('should respect remainingTimeMS', function () { + + }); + }); + context('when encrypt is not provided a timeoutContext', function () { + it('should not timeout within 30 seconds', function () { + + }); + }); + }); + }); + // TODO(NODE-6390): Add timeoutMS support to Auto Encryption - it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => { + + }); }); context.skip('Background Connection Pooling', function () { diff --git a/test/unit/client-side-encryption/auto_encrypter.test.ts b/test/unit/client-side-encryption/auto_encrypter.test.ts index 1e13c0b07c5..41d41f7e89c 100644 --- a/test/unit/client-side-encryption/auto_encrypter.test.ts +++ b/test/unit/client-side-encryption/auto_encrypter.test.ts @@ -11,8 +11,9 @@ import { MongocryptdManager } from '../../../src/client-side-encryption/mongocry import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { MongoClient } from '../../../src/mongo_client'; -import { BSON, type DataKey } from '../../mongodb'; +import { BSON, CSOTTimeoutContext, type DataKey } from '../../mongodb'; import * as requirements from './requirements.helper'; +import { sleep } from '../../tools/utils'; const bson = BSON; const { EJSON } = BSON; @@ -38,7 +39,7 @@ const MOCK_MONGOCRYPTD_RESPONSE = readExtendedJsonToBuffer( const MOCK_KEYDOCUMENT_RESPONSE = readExtendedJsonToBuffer(`${__dirname}/data/key-document.json`); const MOCK_KMS_DECRYPT_REPLY = readHttpResponse(`${__dirname}/data/kms-decrypt-reply.txt`); -class MockClient { +export class MockClient { options: any; constructor(options?: any) { @@ -374,4 +375,70 @@ describe('AutoEncrypter', function () { it('should provide the libmongocrypt version', function () { expect(AutoEncrypter.libmongocryptVersion).to.be.a('string'); }); + + describe.only('CSOT', function () { + let autoEncrypter: AutoEncrypter; + let stateMachineSpy; + let client; + + beforeEach(async function () { + client = new MockClient() as MongoClient; + autoEncrypter = new AutoEncrypter(client, { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }); + await autoEncrypter.init(); + stateMachineSpy = sinon.spy(StateMachine.prototype, 'execute'); + }); + + afterEach(async function () { + sinon.restore(); + }); + + describe('#encrypt', function () { + context('when encrypt is provided a timeoutContext', async function () { + it('should call stateMachine.execute with a timeoutMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await autoEncrypter.encrypt('test.test', { hello: 1 }, { timeoutContext }); + expect(stateMachineSpy.getCalls()[0].args[2]).to.not.be.undefined; + expect(stateMachineSpy.getCalls()[0].args[2].remainingTimeMS).to.be.lessThanOrEqual(200); + }); + }); + context('when encrypt is not provided a timeoutContext', function () { + it('should call stateMachine.execute without a timeoutMS', async function () { + await autoEncrypter.encrypt('test.test', { hello: 1 }); + expect(stateMachineSpy.getCalls()[0].args[2]).to.be.undefined; + }); + }); + }); + + describe('#decrypt', function () { + context('when decrypt is provided a timeoutContext', function () { + it('should respect remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await autoEncrypter.decrypt(BSON.serialize({ ok: 1 }), { timeoutContext }); + expect(stateMachineSpy.getCalls()[0].args[2]).to.not.be.undefined; + expect(stateMachineSpy.getCalls()[0].args[2].remainingTimeMS).to.be.lessThanOrEqual(200); + }); + }); + + context('when decrypt is not provided a timeoutContext', function () { + it('should call stateMachine.execute without a timeoutMS', async function () { + await autoEncrypter.decrypt(BSON.serialize({ ok: 1 })); + expect(stateMachineSpy.getCalls()[0].args[2]).to.be.undefined; + }); + }); + }); + }); }); From 09f6d7d9ae6b4bc9b454e5ada2873d316a12f632 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Wed, 2 Oct 2024 17:31:59 -0400 Subject: [PATCH 049/136] feat(NODE-6274): add CSOT support to bulkWrite (#4250) Co-authored-by: Bailey Pearson --- src/bulk/common.ts | 18 ++- ...ient_side_operations_timeout.prose.test.ts | 142 +++++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 7 - test/tools/unified-spec-runner/match.ts | 14 +- 4 files changed, 159 insertions(+), 22 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index 9eb63382443..a8cec4ba67b 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -503,7 +503,7 @@ export function mergeBatchResults( function executeCommands( bulkOperation: BulkOperationBase, - options: BulkWriteOptions, + options: BulkWriteOptions & { timeoutContext?: TimeoutContext | null }, callback: Callback ) { if (bulkOperation.s.batches.length === 0) { @@ -590,7 +590,11 @@ function executeCommands( : null; if (operation != null) { - executeOperation(bulkOperation.s.collection.client, operation).then( + executeOperation( + bulkOperation.s.collection.client, + operation, + finalOptions.timeoutContext + ).then( result => resultHandler(undefined, result), error => resultHandler(error) ); @@ -899,7 +903,11 @@ export class BulkWriteShimOperation extends AbstractOperation { return 'bulkWrite' as const; } - execute(_server: Server, session: ClientSession | undefined): Promise { + async execute( + _server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (this.options.session == null) { // An implicit session could have been created by 'executeOperation' // So if we stick it on finalOptions here, each bulk operation @@ -907,7 +915,7 @@ export class BulkWriteShimOperation extends AbstractOperation { // an explicit session would be this.options.session = session; } - return executeCommandsAsync(this.bulkOperation, this.options); + return await executeCommandsAsync(this.bulkOperation, { ...this.options, timeoutContext }); } } @@ -1236,7 +1244,7 @@ export abstract class BulkOperationBase { const finalOptions = { ...this.s.options, ...options }; const operation = new BulkWriteShimOperation(this, finalOptions); - return await executeOperation(this.s.collection.client, operation); + return await executeOperation(this.s.collection.client, operation, finalOptions.timeoutContext); } /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 0d36998fd96..e276c9bbafd 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -7,6 +7,7 @@ import * as sinon from 'sinon'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -28,7 +29,7 @@ describe('CSOT spec prose tests', function () { await client?.close(); }); - context.skip('1. Multi-batch writes', () => { + describe('1. Multi-batch writes', { requires: { topology: 'single', mongodb: '>=4.4' } }, () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -55,6 +56,46 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it('performs two inserts which fail to complete before 2000 ms', async () => { + const inserts = []; + client.on('commandStarted', ev => inserts.push(ev)); + + const a = new Uint8Array(1000000 - 22); + const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); + const error = await client + .db('db') + .collection<{ _id: number; a: Uint8Array }>('coll') + .insertMany(oneMBDocs) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoBulkWriteError); + expect(error.errorResponse).to.be.instanceOf(MongoOperationTimeoutError); + expect(inserts.map(ev => ev.commandName)).to.deep.equal(['insert', 'insert']); + }); }); context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { @@ -901,4 +942,103 @@ describe('CSOT spec prose tests', function () { }); }); }); + + describe.skip( + '11. Multi-batch bulkWrites', + { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + function () { + /** + * ### 11. Multi-batch bulkWrites + * + * This test MUST only run against server versions 8.0+. This test must be skipped on Atlas Serverless. + * + * 1. Using `internalClient`, drop the `db.coll` collection. + * + * 2. Using `internalClient`, set the following fail point: + * + * @example + * ```javascript + * { + * configureFailPoint: "failCommand", + * mode: { + * times: 2 + * }, + * data: { + * failCommands: ["bulkWrite"], + * blockConnection: true, + * blockTimeMS: 1010 + * } + * } + * ``` + * + * 3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + * in the response. + * + * 4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + * + * 5. Create a list of write models (referred to as `models`) with the following write model repeated + * (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + * + * @example + * ```json + * InsertOne { + * "namespace": "db.coll", + * "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + * } + * ``` + * + * 6. Call `bulkWrite` on `client` with `models`. + * + * - Expect this to fail with a timeout error. + * + * 7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['bulkWrite'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + let maxBsonObjectSize: number; + let maxMessageSizeBytes: number; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + const hello = await internalClient.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + maxMessageSizeBytes = hello.maxMessageSizeBytes; + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + const writes = []; + client.on('commandStarted', ev => writes.push(ev)); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + const error = await client.bulkWrite(models).catch(error => error); + + expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); + expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); + }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + } + ); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 99914fa08e7..c2e08cfc80a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -5,7 +5,6 @@ import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const skippedSpecs = { - bulkWrite: 'TODO(NODE-6274)', 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', @@ -19,18 +18,12 @@ const skippedSpecs = { }; const skippedTests = { - 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', - 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': 'TODO(NODE-6305)', 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': - 'TODO(NODE-6274)', - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': - 'TODO(NODE-6274)', 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': 'TODO(DRIVERS-2965)', diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 35c274dfbe0..5605e758829 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -787,15 +787,11 @@ export function expectErrorCheck( if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); - } - - // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their - // errorResponse field - if (expected.isTimeoutError === false) { - expect(error).to.not.be.instanceof(MongoOperationTimeoutError); - } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); + if ('errorResponse' in error) { + expect(error.errorResponse).to.be.instanceof(MongoOperationTimeoutError); + } else { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } } if (expected.errorContains != null) { From 601c159acffcdb08916842df5127aec07b3e18d7 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 2 Oct 2024 17:59:43 -0400 Subject: [PATCH 050/136] requested changes --- src/client-side-encryption/state_machine.ts | 20 ++++--------------- ...lient_side_operations_timeout.unit.test.ts | 2 +- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 656a8c0991e..ae00080edde 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -523,18 +523,13 @@ export class StateMachine { ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError( - 'Timed out before call to mongocryptd listCollections operation.' - ); - } - const collections = await client .db(db) .listCollections(filter, { promoteLongs: false, promoteValues: false, - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined, + timeoutMode: 'cursorLifetime' }) .toArray(); @@ -560,11 +555,6 @@ export class StateMachine { const bsonOptions = { promoteLongs: false, promoteValues: false }; const rawCommand = deserialize(command, bsonOptions); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError( - 'Timed out before call to mongocryptd markings request.' - ); - } const response = await client.db(db).command(rawCommand, { ...bsonOptions, ...(timeoutContext?.csotEnabled() @@ -592,14 +582,12 @@ export class StateMachine { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError('Timed out before dataKey fetched.'); - } return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) .find(deserialize(filter), { - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined, + timeoutMode: 'cursorLifetime' }) .toArray(); } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index d2bcf9812b0..329a3b4cb78 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -158,7 +158,7 @@ describe('CSOT spec unit tests', function () { it('the kms request does not timeout within 30 seconds', async function () { const sleepingFn = async () => { - await sleep(55000); + await sleep(30000); throw Error('Slept for 30s'); }; const err = await Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch( From 643e3a7f648eb84d7f8f0ca19f334d8ecd7a3574 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 3 Oct 2024 00:28:40 -0400 Subject: [PATCH 051/136] testing --- src/client-side-encryption/state_machine.ts | 17 ---- ...lient_side_operations_timeout.unit.test.ts | 81 ++++++++++++++----- .../auto_encrypter.test.ts | 2 +- 3 files changed, 64 insertions(+), 36 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 656a8c0991e..994ecf91a96 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -436,9 +436,6 @@ export class StateMachine { } }); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError('Timed out before KMS request.'); - } await (timeoutContext?.csotEnabled() ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) : willResolveKmsRequest); @@ -523,12 +520,6 @@ export class StateMachine { ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError( - 'Timed out before call to mongocryptd listCollections operation.' - ); - } - const collections = await client .db(db) .listCollections(filter, { @@ -560,11 +551,6 @@ export class StateMachine { const bsonOptions = { promoteLongs: false, promoteValues: false }; const rawCommand = deserialize(command, bsonOptions); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError( - 'Timed out before call to mongocryptd markings request.' - ); - } const response = await client.db(db).command(rawCommand, { ...bsonOptions, ...(timeoutContext?.csotEnabled() @@ -592,9 +578,6 @@ export class StateMachine { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError('Timed out before dataKey fetched.'); - } return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 77e8cf9376f..54e88210670 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -11,10 +11,18 @@ import { TLSSocket } from 'tls'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { + addContainerMetadata, BSON, + connect, + Connection, + ConnectionOptions, ConnectionPool, + CryptoConnection, CSOTTimeoutContext, + HostAddress, + makeClientMetadata, type MongoClient, + MongoClientAuthProviders, MongoOperationTimeoutError, Timeout, TimeoutContext, @@ -172,34 +180,71 @@ describe('CSOT spec unit tests', function () { }); }); - describe('CryptoConnection', function () { - let autoEncrypter; - beforeEach(async function () { - autoEncrypter = new AutoEncrypter(client, { + describe('CryptoConnection.command()', function () { + let conn; + + beforeEach(async function () { + const commonConnectOptions = { + id: 1, + generation: 1, + monitorCommands: false, + tls: false, + loadBalanced: false, + // Will be overridden by configuration options + hostAddress: HostAddress.fromString('127.0.0.1:1'), + authProviders: new MongoClientAuthProviders() + }; + + const autoEncrypter = new AutoEncrypter( + this.configuration.newClient(), + { keyVaultNamespace: 'admin.datakeys', kmsProviders: { aws: { accessKeyId: 'example', secretAccessKey: 'example' }, local: { key: Buffer.alloc(96) } } - }); - await autoEncrypter.init(); - }); - - afterEach(async function () { + } + ); + let connectOptions: ConnectionOptions = { + ...commonConnectOptions, + autoEncrypter, + connectionType: CryptoConnection, + ...this.configuration.options, + metadata: makeClientMetadata({ driverInfo: {} }), + extendedMetadata: addContainerMetadata(makeClientMetadata({ driverInfo: {} })) + }; + conn = await connect(connectOptions); + }); - }); - describe('#command', function () { - context('when encrypt is provided a timeoutContext', function () { - it('should respect remainingTimeMS', function () { + afterEach(async function () { + conn?.destroy(); + }); - }); + context('when provided a timeoutContext', function () { + it('should respect remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 }); - context('when encrypt is not provided a timeoutContext', function () { - it('should not timeout within 30 seconds', function () { + const err = await conn.command('test.test', { find: 'test', filter: {} }, { timeoutContext }).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); - }); - }); + context('when not provided a timeoutContext', function () { + it.only('should not timeout within 30 seconds', async function () { + const client = this.configuration.newClient(); + client.connect(); + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + const err = await Promise.all([conn.command('test.test', { find: 'test', filter: {} }), sleepingFn()]) + .catch(e => e); + expect(err.message).to.equal('Slept for 30s'); }); + }); }); // TODO(NODE-6390): Add timeoutMS support to Auto Encryption diff --git a/test/unit/client-side-encryption/auto_encrypter.test.ts b/test/unit/client-side-encryption/auto_encrypter.test.ts index 41d41f7e89c..7a66b768e63 100644 --- a/test/unit/client-side-encryption/auto_encrypter.test.ts +++ b/test/unit/client-side-encryption/auto_encrypter.test.ts @@ -39,7 +39,7 @@ const MOCK_MONGOCRYPTD_RESPONSE = readExtendedJsonToBuffer( const MOCK_KEYDOCUMENT_RESPONSE = readExtendedJsonToBuffer(`${__dirname}/data/key-document.json`); const MOCK_KMS_DECRYPT_REPLY = readHttpResponse(`${__dirname}/data/kms-decrypt-reply.txt`); -export class MockClient { +class MockClient { options: any; constructor(options?: any) { From 096f154a3bd7a6f9efdce9cf2a2a77133830f588 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 3 Oct 2024 11:12:43 -0400 Subject: [PATCH 052/136] fix failing tests --- src/client-side-encryption/state_machine.ts | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index ae00080edde..8a48ac8fbad 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -528,8 +528,9 @@ export class StateMachine { .listCollections(filter, { promoteLongs: false, promoteValues: false, - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined, - timeoutMode: 'cursorLifetime' + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {}) }) .toArray(); @@ -585,10 +586,12 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter), { - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined, - timeoutMode: 'cursorLifetime' - }) + .find( + deserialize(filter), + timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {} + ) .toArray(); } } From 5aba790e72bb5fd24e5ceafc343611b9f7a32e47 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 3 Oct 2024 16:42:40 -0400 Subject: [PATCH 053/136] requested changes 3 --- src/client-side-encryption/state_machine.ts | 4 --- ...lient_side_operations_timeout.unit.test.ts | 34 +++++++++++++------ 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 8a48ac8fbad..f47ee191b54 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -435,10 +435,6 @@ export class StateMachine { resolve(); } }); - - if (timeoutContext?.csotEnabled() && timeoutContext?.remainingTimeMS <= 0) { - throw new MongoOperationTimeoutError('Timed out before KMS request.'); - } await (timeoutContext?.csotEnabled() ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) : willResolveKmsRequest); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 329a3b4cb78..7387099a7f1 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -6,7 +6,9 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; +import { setTimeout } from 'timers'; import { TLSSocket } from 'tls'; +import { promisify } from 'util'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; @@ -19,7 +21,7 @@ import { TimeoutContext, Topology } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -123,10 +125,7 @@ describe('CSOT spec unit tests', function () { context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { beforeEach(async function () { - sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { - await sleep(200); - return {} as TLSSocket; - }); + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) {}); }); afterEach(async function () { @@ -138,7 +137,6 @@ describe('CSOT spec unit tests', function () { timeoutMS: 500, serverSelectionTimeoutMS: 30000 }); - sleep(300); const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); expect(err).to.be.instanceOf(MongoOperationTimeoutError); expect(err.errmsg).to.equal('KMS request timed out'); @@ -146,13 +144,26 @@ describe('CSOT spec unit tests', function () { }); context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + beforeEach(async function () { - sinon.stub(TLSSocket.prototype, 'connect').callsFake(async function (..._args) { - return {} as TLSSocket; + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); }); afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } sinon.restore(); }); @@ -161,9 +172,10 @@ describe('CSOT spec unit tests', function () { await sleep(30000); throw Error('Slept for 30s'); }; - const err = await Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch( - e => e - ); + + const err$ = Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; expect(err.message).to.equal('Slept for 30s'); }); }); From e06a553f786fbacd4ae5b00bd6b5dd3cb4714637 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 3 Oct 2024 16:55:43 -0400 Subject: [PATCH 054/136] temp --- src/client-side-encryption/state_machine.ts | 13 ++---- ...lient_side_operations_timeout.unit.test.ts | 45 ++++++++++++++----- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 8073a6e1676..994ecf91a96 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -525,9 +525,7 @@ export class StateMachine { .listCollections(filter, { promoteLongs: false, promoteValues: false, - ...(timeoutContext?.csotEnabled() - ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } - : {}) + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined }) .toArray(); @@ -583,12 +581,9 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find( - deserialize(filter), - timeoutContext?.csotEnabled() - ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } - : {} - ) + .find(deserialize(filter), { + timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + }) .toArray(); } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 11c30c443a1..f3718f100c1 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -193,7 +193,7 @@ describe('CSOT spec unit tests', function () { }); }); - describe('CryptoConnection.command()', function () { + describe.only('CryptoConnection.command()', function () { let conn; beforeEach(async function () { @@ -245,17 +245,40 @@ describe('CSOT spec unit tests', function () { }); }); - context('when not provided a timeoutContext', function () { - it.only('should not timeout within 30 seconds', async function () { - const client = this.configuration.newClient(); - client.connect(); + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { const sleepingFn = async () => { - await sleep(30000); - throw Error('Slept for 30s'); - }; - const err = await Promise.all([conn.command('test.test', { find: 'test', filter: {} }), sleepingFn()]) - .catch(e => e); - expect(err.message).to.equal('Slept for 30s'); + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([conn.command('test.test', { find: 'test', filter: {} }), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); }); }); }); From 903e0d06957b9b8c51683020a05ea52c39ff51c9 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 3 Oct 2024 17:12:29 -0400 Subject: [PATCH 055/136] limit flaky tests --- test/unit/client-side-encryption/state_machine.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index fe9659675a4..95bb6056355 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -494,7 +494,7 @@ describe('StateMachine', function () { .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) .catch(e => squashError(e)); expect(findSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; - expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); }); }); @@ -533,7 +533,7 @@ describe('StateMachine', function () { .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) .catch(e => squashError(e)); expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; - expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); }); }); @@ -574,7 +574,7 @@ describe('StateMachine', function () { .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) .catch(e => squashError(e)); expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; - expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(200); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); }); } ); From 9a4dd7b4a4776200c8512911bc9a443f55ee2b5f Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Fri, 4 Oct 2024 03:18:09 -0400 Subject: [PATCH 056/136] ready for review --- src/client-side-encryption/auto_encrypter.ts | 3 +- ...lient_side_operations_timeout.unit.test.ts | 137 ++++++------------ .../auto_encrypter.test.ts | 6 +- 3 files changed, 52 insertions(+), 94 deletions(-) diff --git a/src/client-side-encryption/auto_encrypter.ts b/src/client-side-encryption/auto_encrypter.ts index c655e82d6b6..d2ebf61ca79 100644 --- a/src/client-side-encryption/auto_encrypter.ts +++ b/src/client-side-encryption/auto_encrypter.ts @@ -399,7 +399,8 @@ export class AutoEncrypter { await stateMachine.execute( this, context, - options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined), + options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined + ), { promoteValues: false, promoteLongs: false diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index f3718f100c1..1ed9a1ca3bb 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -13,26 +13,16 @@ import { promisify } from 'util'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { - addContainerMetadata, - BSON, - connect, - Connection, - ConnectionOptions, ConnectionPool, CryptoConnection, CSOTTimeoutContext, - HostAddress, - makeClientMetadata, type MongoClient, - MongoClientAuthProviders, MongoOperationTimeoutError, + squashError, Timeout, TimeoutContext, Topology } from '../../mongodb'; -/* eslint-disable @typescript-eslint/no-restricted-imports */ -import { AutoEncrypter } from '../../../src/client-side-encryption/auto_encrypter'; -import { sleep } from '../../tools/utils'; import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests @@ -193,100 +183,67 @@ describe('CSOT spec unit tests', function () { }); }); - describe.only('CryptoConnection.command()', function () { - let conn; + describe('Auto Encryption', function () { + let client; + let spy; beforeEach(async function () { - const commonConnectOptions = { - id: 1, - generation: 1, - monitorCommands: false, - tls: false, - loadBalanced: false, - // Will be overridden by configuration options - hostAddress: HostAddress.fromString('127.0.0.1:1'), - authProviders: new MongoClientAuthProviders() - }; - - const autoEncrypter = new AutoEncrypter( - this.configuration.newClient(), - { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - } - ); - let connectOptions: ConnectionOptions = { - ...commonConnectOptions, - autoEncrypter, - connectionType: CryptoConnection, - ...this.configuration.options, - metadata: makeClientMetadata({ driverInfo: {} }), - extendedMetadata: addContainerMetadata(makeClientMetadata({ driverInfo: {} })) - }; - conn = await connect(connectOptions); + spy = sinon.spy(CryptoConnection.prototype, 'command'); }); afterEach(async function () { - conn?.destroy(); + await client?.close(); + sinon.restore(); }); - context('when provided a timeoutContext', function () { - it('should respect remainingTimeMS', async function () { - const timeoutContext = new CSOTTimeoutContext({ - timeoutMS: 500, - serverSelectionTimeoutMS: 30000 - }); - const err = await conn.command('test.test', { find: 'test', filter: {} }, { timeoutContext }).catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - expect(err.errmsg).to.equal('KMS request timed out'); + context('when client is provided timeoutMS', function () { + it('should pass timeoutMS into commands sent to mongocryptd', async function () { + client = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 10000 + } + ); + await client + .db() + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(spy.getCalls()[2].args[2].timeoutMS).to.exist; }); }); - context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { - let clock: sinon.SinonFakeTimers; - let timerSandbox: sinon.SinonSandbox; - - let sleep; - - beforeEach(async function () { - sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { - clock.tick(30000); - }); - timerSandbox = createTimerSandbox(); - clock = sinon.useFakeTimers(); - sleep = promisify(setTimeout); - }); - - afterEach(async function () { - if (clock) { - timerSandbox.restore(); - clock.restore(); - clock = undefined; - } - sinon.restore(); - }); - - it('the kms request does not timeout within 30 seconds', async function () { - const sleepingFn = async () => { - await sleep(30000); - throw Error('Slept for 30s'); - }; - - const err$ = Promise.all([conn.command('test.test', { find: 'test', filter: {} }), sleepingFn()]).catch(e => e); - clock.tick(30000); - const err = await err$; - expect(err.message).to.equal('Slept for 30s'); + context('when client is not provided timeoutMS`', function () { + it('should pass timeoutMS into commands sent to mongocryptd', async function () { + client = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + } + } + ); + await client + .db() + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(spy.getCalls()[2].args[2].timeoutMS).to.not.exist; }); }); }); // TODO(NODE-6390): Add timeoutMS support to Auto Encryption - it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => { - - }); + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); }); context.skip('Background Connection Pooling', function () { diff --git a/test/unit/client-side-encryption/auto_encrypter.test.ts b/test/unit/client-side-encryption/auto_encrypter.test.ts index 7a66b768e63..3f0e3386031 100644 --- a/test/unit/client-side-encryption/auto_encrypter.test.ts +++ b/test/unit/client-side-encryption/auto_encrypter.test.ts @@ -12,8 +12,8 @@ import { StateMachine } from '../../../src/client-side-encryption/state_machine' // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { MongoClient } from '../../../src/mongo_client'; import { BSON, CSOTTimeoutContext, type DataKey } from '../../mongodb'; -import * as requirements from './requirements.helper'; import { sleep } from '../../tools/utils'; +import * as requirements from './requirements.helper'; const bson = BSON; const { EJSON } = BSON; @@ -376,7 +376,7 @@ describe('AutoEncrypter', function () { expect(AutoEncrypter.libmongocryptVersion).to.be.a('string'); }); - describe.only('CSOT', function () { + describe('CSOT', function () { let autoEncrypter: AutoEncrypter; let stateMachineSpy; let client; @@ -399,7 +399,7 @@ describe('AutoEncrypter', function () { }); describe('#encrypt', function () { - context('when encrypt is provided a timeoutContext', async function () { + context('when encrypt is provided a timeoutContext', function () { it('should call stateMachine.execute with a timeoutMS', async function () { const timeoutContext = new CSOTTimeoutContext({ timeoutMS: 500, From 1a06868704bde7c311049374235a1bfa51e5c0c0 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 4 Oct 2024 10:52:21 -0400 Subject: [PATCH 057/136] feat(NODE-6275): Add CSOT support to GridFS (#4246) Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- package-lock.json | 9 +- package.json | 2 +- src/collection.ts | 10 +- src/gridfs/download.ts | 44 +++- src/gridfs/index.ts | 74 +++++-- src/gridfs/upload.ts | 191 ++++++++++++++---- src/operations/find.ts | 1 - src/timeout.ts | 12 ++ ...ient_side_operations_timeout.prose.test.ts | 171 +++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 5 - .../node_csot.test.ts | 167 ++++++++++++++- test/tools/unified-spec-runner/operations.ts | 37 +++- 12 files changed, 634 insertions(+), 89 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1d9cebf509b..2b3a9b897aa 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6440,10 +6440,11 @@ } }, "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.2.tgz", + "integrity": "sha512-oj+LLtvhhi8XuAQ8dll2BVjrnKxOo/7ylyQu0LsKmzyGcbrvzcyvFUOLC6rPhuJPOvnezh3MZ3/Sk9Tl1jpUpg==", "dev": true, + "license": "Apache-2.0", "dependencies": { "mongodb": "^6.0.0" }, diff --git a/package.json b/package.json index 2de0e1811f0..0c4c668726a 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/collection.ts b/src/collection.ts index a73a5276f5f..62fa5bd4cba 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -501,12 +501,18 @@ export class Collection { */ async findOne(): Promise | null>; async findOne(filter: Filter): Promise | null>; - async findOne(filter: Filter, options: FindOptions): Promise | null>; + async findOne( + filter: Filter, + options: Omit + ): Promise | null>; // allow an override of the schema. async findOne(): Promise; async findOne(filter: Filter): Promise; - async findOne(filter: Filter, options?: FindOptions): Promise; + async findOne( + filter: Filter, + options?: Omit + ): Promise; async findOne( filter: Filter = {}, diff --git a/src/gridfs/download.ts b/src/gridfs/download.ts index 06dda0a92ba..19651b885ea 100644 --- a/src/gridfs/download.ts +++ b/src/gridfs/download.ts @@ -2,6 +2,7 @@ import { Readable } from 'stream'; import type { Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { FindCursor } from '../cursor/find_cursor'; import { MongoGridFSChunkError, @@ -12,6 +13,7 @@ import { import type { FindOptions } from '../operations/find'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; import type { Callback } from '../utils'; import type { GridFSChunk } from './upload'; @@ -28,7 +30,7 @@ export interface GridFSBucketReadStreamOptions { * to be returned by the stream. `end` is non-inclusive */ end?: number; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -98,8 +100,10 @@ export interface GridFSBucketReadStreamPrivate { skip?: number; start: number; end: number; + timeoutMS?: number; }; readPreference?: ReadPreference; + timeoutContext?: CSOTTimeoutContext; } /** @@ -148,7 +152,11 @@ export class GridFSBucketReadStream extends Readable { end: 0, ...options }, - readPreference + readPreference, + timeoutContext: + options?.timeoutMS != null + ? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 }) + : undefined }; } @@ -196,7 +204,8 @@ export class GridFSBucketReadStream extends Readable { async abort(): Promise { this.push(null); this.destroy(); - await this.s.cursor?.close(); + const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow(); + await this.s.cursor?.close({ timeoutMS: remainingTimeMS }); } } @@ -352,7 +361,22 @@ function init(stream: GridFSBucketReadStream): void { filter['n'] = { $gte: skip }; } } - stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 }); + + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + return stream.destroy(error); + } + + stream.s.cursor = stream.s.chunks + .find(filter, { + timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .sort({ n: 1 }); if (stream.s.readPreference) { stream.s.cursor.withReadPreference(stream.s.readPreference); @@ -371,6 +395,18 @@ function init(stream: GridFSBucketReadStream): void { return; }; + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + if (!stream.destroyed) stream.destroy(error); + return; + } + + findOneOptions.timeoutMS = remainingTimeMS; + stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => { if (stream.destroyed) return; stream.destroy(error); diff --git a/src/gridfs/index.ts b/src/gridfs/index.ts index 51c32b7a01c..de114e5e597 100644 --- a/src/gridfs/index.ts +++ b/src/gridfs/index.ts @@ -2,10 +2,12 @@ import type { ObjectId } from '../bson'; import type { Collection } from '../collection'; import type { FindCursor } from '../cursor/find_cursor'; import type { Db } from '../db'; -import { MongoRuntimeError } from '../error'; +import { MongoOperationTimeoutError, MongoRuntimeError } from '../error'; import { type Filter, TypedEventEmitter } from '../mongo_types'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; +import { resolveOptions } from '../utils'; import { WriteConcern, type WriteConcernOptions } from '../write_concern'; import type { FindOptions } from './../operations/find'; import { @@ -48,6 +50,7 @@ export interface GridFSBucketPrivate { chunkSizeBytes: number; readPreference?: ReadPreference; writeConcern: WriteConcern | undefined; + timeoutMS?: number; }; _chunksCollection: Collection; _filesCollection: Collection; @@ -81,11 +84,11 @@ export class GridFSBucket extends TypedEventEmitter { constructor(db: Db, options?: GridFSBucketOptions) { super(); this.setMaxListeners(0); - const privateOptions = { + const privateOptions = resolveOptions(db, { ...DEFAULT_GRIDFS_BUCKET_OPTIONS, ...options, writeConcern: WriteConcern.fromOptions(options) - }; + }); this.s = { db, options: privateOptions, @@ -109,7 +112,10 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, options); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options + }); } /** @@ -122,7 +128,11 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, { ...options, id }); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options, + id + }); } /** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */ @@ -135,7 +145,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { _id: id }, - options + { timeoutMS: this.s.options.timeoutMS, ...options } ); } @@ -144,11 +154,27 @@ export class GridFSBucket extends TypedEventEmitter { * * @param id - The id of the file doc */ - async delete(id: ObjectId): Promise { - const { deletedCount } = await this.s._filesCollection.deleteOne({ _id: id }); + async delete(id: ObjectId, options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + const { deletedCount } = await this.s._filesCollection.deleteOne( + { _id: id }, + { timeoutMS: timeoutContext?.remainingTimeMS } + ); + + const remainingTimeMS = timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`); // Delete orphaned chunks before returning FileNotFound - await this.s._chunksCollection.deleteMany({ files_id: id }); + await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS }); if (deletedCount === 0) { // TODO(NODE-3483): Replace with more appropriate error @@ -188,7 +214,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { filename }, - { ...options, sort, skip } + { timeoutMS: this.s.options.timeoutMS, ...options, sort, skip } ); } @@ -198,18 +224,36 @@ export class GridFSBucket extends TypedEventEmitter { * @param id - the id of the file to rename * @param filename - new name for the file */ - async rename(id: ObjectId, filename: string): Promise { + async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise { const filter = { _id: id }; const update = { $set: { filename } }; - const { matchedCount } = await this.s._filesCollection.updateOne(filter, update); + const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options); if (matchedCount === 0) { throw new MongoRuntimeError(`File with id ${id} not found`); } } /** Removes this bucket's files collection, followed by its chunks collection. */ - async drop(): Promise { - await this.s._filesCollection.drop(); - await this.s._chunksCollection.drop(); + async drop(options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + + if (timeoutContext) { + await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS }); + const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow( + `Timed out after ${timeoutMS}ms` + ); + await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS }); + } else { + await this.s._filesCollection.drop(); + await this.s._chunksCollection.drop(); + } } } diff --git a/src/gridfs/upload.ts b/src/gridfs/upload.ts index f54d5131f66..c7544b715d8 100644 --- a/src/gridfs/upload.ts +++ b/src/gridfs/upload.ts @@ -2,7 +2,14 @@ import { Writable } from 'stream'; import { type Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; -import { MongoAPIError, MONGODB_ERROR_CODES, MongoError } from '../error'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { + MongoAPIError, + MONGODB_ERROR_CODES, + MongoError, + MongoOperationTimeoutError +} from '../error'; +import { CSOTTimeoutContext } from '../timeout'; import { type Callback, squashError } from '../utils'; import type { WriteConcernOptions } from '../write_concern'; import { WriteConcern } from './../write_concern'; @@ -35,7 +42,7 @@ export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions { * @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead. */ aliases?: string[]; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -97,6 +104,8 @@ export class GridFSBucketWriteStream extends Writable { * ``` */ gridFSFile: GridFSFile | null = null; + /** @internal */ + timeoutContext?: CSOTTimeoutContext; /** * @param bucket - Handle for this stream's corresponding bucket @@ -131,14 +140,11 @@ export class GridFSBucketWriteStream extends Writable { aborted: false }; - if (!this.bucket.s.calledOpenUploadStream) { - this.bucket.s.calledOpenUploadStream = true; - - checkIndexes(this).then(() => { - this.bucket.s.checkedIndexes = true; - this.bucket.emit('index'); - }, squashError); - } + if (options.timeoutMS != null) + this.timeoutContext = new CSOTTimeoutContext({ + timeoutMS: options.timeoutMS, + serverSelectionTimeoutMS: this.bucket.s.db.client.options.serverSelectionTimeoutMS + }); } /** @@ -147,10 +153,26 @@ export class GridFSBucketWriteStream extends Writable { * The stream is considered constructed when the indexes are done being created */ override _construct(callback: (error?: Error | null) => void): void { - if (this.bucket.s.checkedIndexes) { + if (!this.bucket.s.calledOpenUploadStream) { + this.bucket.s.calledOpenUploadStream = true; + + checkIndexes(this).then( + () => { + this.bucket.s.checkedIndexes = true; + this.bucket.emit('index'); + callback(); + }, + error => { + if (error instanceof MongoOperationTimeoutError) { + return handleError(this, error, callback); + } + squashError(error); + callback(); + } + ); + } else { return process.nextTick(callback); } - this.bucket.once('index', callback); } /** @@ -194,7 +216,10 @@ export class GridFSBucketWriteStream extends Writable { } this.state.aborted = true; - await this.chunks.deleteMany({ files_id: this.id }); + const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${this.timeoutContext?.timeoutMS}ms` + ); + await this.chunks.deleteMany({ files_id: this.id, timeoutMS: remainingTimeMS }); } } @@ -219,9 +244,19 @@ function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise { const index = { files_id: 1, n: 1 }; + let remainingTimeMS; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + let indexes; try { - indexes = await stream.chunks.listIndexes().toArray(); + indexes = await stream.chunks + .listIndexes({ + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -239,10 +274,14 @@ async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise }); if (!hasChunksIndex) { + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); await stream.chunks.createIndex(index, { ...stream.writeConcern, background: true, - unique: true + unique: true, + timeoutMS: remainingTimeMS }); } } @@ -270,13 +309,28 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { return; } - stream.files.insertOne(gridFSFile, { writeConcern: stream.writeConcern }).then( - () => { - stream.gridFSFile = gridFSFile; - callback(); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + + stream.files + .insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + stream.gridFSFile = gridFSFile; + callback(); + }, + error => { + return handleError(stream, error, callback); + } + ); return; } @@ -284,7 +338,16 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { } async function checkIndexes(stream: GridFSBucketWriteStream): Promise { - const doc = await stream.files.findOne({}, { projection: { _id: 1 } }); + let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const doc = await stream.files.findOne( + {}, + { + projection: { _id: 1 }, + timeoutMS: remainingTimeMS + } + ); if (doc != null) { // If at least one document exists assume the collection has the required index return; @@ -293,8 +356,15 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { const index = { filename: 1, uploadDate: 1 }; let indexes; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const listIndexesOptions = { + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }; try { - indexes = await stream.files.listIndexes().toArray(); + indexes = await stream.files.listIndexes(listIndexesOptions).toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -312,7 +382,11 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { }); if (!hasFileIndex) { - await stream.files.createIndex(index, { background: false }); + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + + await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS }); } await checkChunksIndex(stream); @@ -386,6 +460,18 @@ function doWrite( let doc: GridFSChunk; if (spaceRemaining === 0) { doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore)); + + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; ++outstandingRequests; @@ -393,17 +479,21 @@ function doWrite( return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - --outstandingRequests; - - if (!outstandingRequests) { - checkDone(stream, callback); + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + --outstandingRequests; + + if (!outstandingRequests) { + checkDone(stream, callback); + } + }, + error => { + return handleError(stream, error, callback); } - }, - error => handleError(stream, error, callback) - ); + ); spaceRemaining = stream.chunkSizeBytes; stream.pos = 0; @@ -420,8 +510,6 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return checkDone(stream, callback); } - ++stream.state.outstandingRequests; - // Create a new buffer to make sure the buffer isn't bigger than it needs // to be. const remnant = Buffer.alloc(stream.pos); @@ -433,13 +521,28 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - checkDone(stream, callback); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + checkDone(stream, callback); + }, + error => { + return handleError(stream, error, callback); + } + ); } function isAborted(stream: GridFSBucketWriteStream, callback: Callback): boolean { diff --git a/src/operations/find.ts b/src/operations/find.ts index c39695cc0bc..641255553a0 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -66,7 +66,6 @@ export interface FindOptions */ oplogReplay?: boolean; - /** @internal*/ timeoutMode?: CursorTimeoutMode; } diff --git a/src/timeout.ts b/src/timeout.ts index f7fb3d0daa5..f694b5f4f4f 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -305,6 +305,18 @@ export class CSOTTimeoutContext extends TimeoutContext { this._serverSelectionTimeout?.clear(); this._connectionCheckoutTimeout?.clear(); } + + /** + * @internal + * Throws a MongoOperationTimeoutError if the context has expired. + * If the context has not expired, returns the `remainingTimeMS` + **/ + getRemainingTimeMSOrThrow(message?: string): number { + const { remainingTimeMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); + return remainingTimeMS; + } } /** @internal */ diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index e276c9bbafd..1b8c34633b4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -3,15 +3,20 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { Readable } from 'stream'; +import { pipeline } from 'stream/promises'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + GridFSBucket, MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, - now + now, + ObjectId, + promiseWithResolvers } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -398,10 +403,42 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('6. GridFS - Upload', () => { + context('6. GridFS - Upload', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + if (client) { + await client.close(); + } + }); /** Tests in this section MUST only be run against server versions 4.4 and higher. */ - context('uploads via openUploadStream can be timed out', () => { + it('uploads via openUploadStream can be timed out', metadata, async function () { /** * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. * 1. Using `internalClient`, set the following fail point: @@ -424,9 +461,30 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.close()` to flush the stream and insert chunks. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const bucket = new GridFSBucket(client.db('db')); + const stream = bucket.openUploadStream('filename'); + const data = Buffer.from('13', 'hex'); + + const fileStream = Readable.from(data); + const maybeError = await pipeline(fileStream, stream).then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); }); - context('Aborting an upload stream can be timed out', () => { + it('Aborting an upload stream can be timed out', metadata, async function () { /** * This test only applies to drivers that provide an API to abort a GridFS upload stream. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -450,10 +508,92 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.abort()`. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['delete'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + await internalClient.db().admin().command(failpoint); + const bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 300 }); + + const data = Buffer.from('01020304', 'hex'); + + const { promise: writePromise, resolve, reject } = promiseWithResolvers(); + uploadStream.on('error', error => uploadStream.destroy(error)); + uploadStream.write(data, error => { + if (error) reject(error); + else resolve(); + }); + let maybeError = await writePromise.then( + () => null, + e => e + ); + expect(maybeError).to.be.null; + + maybeError = await uploadStream.abort().then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + uploadStream.destroy(); }); }); - context.skip('7. GridFS - Download', () => { + context('7. GridFS - Download', () => { + let internalClient: MongoClient; + let client: MongoClient; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + + await files.insertOne({ + _id: new ObjectId('000000000000000000000005'), + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + + if (client) { + await client.close(); + } + }); + /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -495,6 +635,27 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against `db.fs.chunks`. */ + it('download streams can be timed out', metadata, async function () { + const bucket = new GridFSBucket(client.db('db')); + const downloadStream = bucket.openDownloadStream(new ObjectId('000000000000000000000005')); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); }); context('8. Server Selection', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index c2e08cfc80a..49ddabc924b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -8,11 +8,6 @@ const skippedSpecs = { 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', - 'gridfs-advanced': 'TODO(NODE-6275)', - 'gridfs-delete': 'TODO(NODE-6275)', - 'gridfs-download': 'TODO(NODE-6275)', - 'gridfs-find': 'TODO(NODE-6275)', - 'gridfs-upload': 'TODO(NODE-6275)', 'tailable-awaitData': 'TODO(NODE-6035)', 'tailable-non-awaitData': 'TODO(NODE-6035)' }; diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 56127cc8ace..b2011ee2e73 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,7 @@ /* Anything javascript specific relating to timeouts */ +import { once } from 'node:events'; +import { Readable } from 'node:stream'; +import { pipeline } from 'node:stream/promises'; import { setTimeout } from 'node:timers/promises'; import { expect } from 'chai'; @@ -15,11 +18,13 @@ import { Connection, type Db, type FindCursor, + GridFSBucket, LEGACY_HELLO_COMMAND, type MongoClient, MongoInvalidArgumentError, MongoOperationTimeoutError, - MongoServerError + MongoServerError, + ObjectId } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -576,6 +581,166 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('GridFSBucket', () => { + const blockTimeMS = 200; + let internalClient: MongoClient; + let client: MongoClient; + let bucket: GridFSBucket; + + beforeEach(async function () { + client = this.configuration.newClient(undefined, { timeoutMS: 1000 }); + internalClient = this.configuration.newClient(undefined); + }); + + afterEach(async function () { + await client.close(); + await internalClient.db().admin().command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + }); + + context('upload', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + }); + + describe('openUploadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 175 }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + + it('only emits index event once per bucket', metadata, async function () { + let numEventsSeen = 0; + bucket.on('index', () => numEventsSeen++); + + const uploadStream0 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream0.destroy(error)); + const uploadStream1 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream1.destroy(error)); + + const data = Buffer.from('test', 'utf-8'); + await pipeline(Readable.from(data), uploadStream0); + await pipeline(Readable.from(data), uploadStream1); + + expect(numEventsSeen).to.equal(1); + }); + }); + + describe('openUploadStreamWithId', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStreamWithId(new ObjectId(), 'filename', { + timeoutMS: 175 + }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + + context('download', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS + } + }; + const _id = new ObjectId('000000000000000000000005'); + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + await files.insertOne({ + _id, + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(db); + }); + + describe('openDownloadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStream(_id, { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('openDownloadStreamByName', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStreamByName('length-10', { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + }); + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 31414fa4664..a9f79842c31 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -11,6 +11,7 @@ import { CommandStartedEvent, Db, type Document, + GridFSBucket, type MongoClient, MongoError, ReadConcern, @@ -311,7 +312,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { operations.set('drop', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.drop(); + return bucket.drop(operation.arguments); }); operations.set('dropIndexes', async ({ entities, operation }) => { @@ -529,7 +530,8 @@ operations.set('targetedFailPoint', async ({ entities, operation }) => { operations.set('delete', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.delete(operation.arguments!.id); + const { id, ...opts } = operation.arguments; + return bucket.delete(id, opts); }); operations.set('download', async ({ entities, operation }) => { @@ -537,7 +539,8 @@ operations.set('download', async ({ entities, operation }) => { const { id, ...options } = operation.arguments ?? {}; const stream = bucket.openDownloadStream(id, options); - return Buffer.concat(await stream.toArray()); + const data = Buffer.concat(await stream.toArray()); + return data; }); operations.set('downloadByName', async ({ entities, operation }) => { @@ -552,7 +555,6 @@ operations.set('downloadByName', async ({ entities, operation }) => { operations.set('upload', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); const { filename, source, ...options } = operation.arguments ?? {}; - const stream = bucket.openUploadStream(filename, options); const fileStream = Readable.from(Buffer.from(source.$$hexBytes, 'hex')); @@ -832,9 +834,30 @@ operations.set('updateOne', async ({ entities, operation }) => { }); operations.set('rename', async ({ entities, operation }) => { - const collection = entities.getEntity('collection', operation.object); - const { to, ...options } = operation.arguments!; - return collection.rename(to, options); + let entity: GridFSBucket | Collection | undefined; + try { + entity = entities.getEntity('collection', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof Collection) { + const { to, ...options } = operation.arguments!; + return entity.rename(to, options); + } + + try { + entity = entities.getEntity('bucket', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof GridFSBucket) { + const { id, newFilename, ...opts } = operation.arguments!; + return entity.rename(id, newFilename, opts as any); + } + + expect.fail(`No collection or bucket with name '${operation.object}' found`); }); operations.set('createDataKey', async ({ entities, operation }) => { From a4fb3d0bc2df06d3aa9661353175643591021bae Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Fri, 4 Oct 2024 13:31:17 -0400 Subject: [PATCH 058/136] fix revert --- src/client-side-encryption/state_machine.ts | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index 994ecf91a96..f47ee191b54 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -435,7 +435,6 @@ export class StateMachine { resolve(); } }); - await (timeoutContext?.csotEnabled() ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) : willResolveKmsRequest); @@ -525,7 +524,9 @@ export class StateMachine { .listCollections(filter, { promoteLongs: false, promoteValues: false, - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {}) }) .toArray(); @@ -581,9 +582,12 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter), { - timeoutMS: timeoutContext?.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined - }) + .find( + deserialize(filter), + timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {} + ) .toArray(); } } From 392599ce6761921ce626d72e4b6875d5a0111875 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Fri, 4 Oct 2024 15:16:50 -0600 Subject: [PATCH 059/136] refactor(NODE-6411): AbstractCursor accepts an external timeout context (#4264) --- src/cmap/connection.ts | 6 +- src/cursor/abstract_cursor.ts | 134 ++++++++++++++---- src/index.ts | 1 + src/operations/find.ts | 5 +- src/timeout.ts | 17 +++ .../node_csot.test.ts | 18 ++- .../crud/find_cursor_methods.test.js | 52 +++++-- .../node-specific/abstract_cursor.test.ts | 117 ++++++++++++++- ...er_selection.prose.operation_count.test.ts | 23 +-- test/tools/utils.ts | 32 ++++- 10 files changed, 330 insertions(+), 75 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507615e9f03..a43d6106c7b 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -422,9 +422,9 @@ export class Connection extends TypedEventEmitter { ...options }; - if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { - const { maxTimeMS } = options.timeoutContext; - if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + if (!options.omitMaxTimeMS) { + const maxTimeMS = options.timeoutContext?.maxTimeMS; + if (maxTimeMS && maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } const message = this.supportsOpMsg diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index d0f386923ad..f7e488d24b2 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,7 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; -import { TimeoutContext } from '../timeout'; +import { type CSOTTimeoutContext, type Timeout, TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -119,6 +119,14 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { timeoutMS?: number; /** @internal TODO(NODE-5688): make this public */ timeoutMode?: CursorTimeoutMode; + + /** + * @internal + * + * A timeout context to govern the total time the cursor can live. If provided, the cursor + * cannot be used in ITERATION mode. + */ + timeoutContext?: CursorTimeoutContext; } /** @internal */ @@ -171,7 +179,7 @@ export abstract class AbstractCursor< /** @internal */ protected readonly cursorOptions: InternalAbstractCursorOptions; /** @internal */ - protected timeoutContext?: TimeoutContext; + protected timeoutContext?: CursorTimeoutContext; /** @event */ static readonly CLOSE = 'close' as const; @@ -205,20 +213,12 @@ export abstract class AbstractCursor< }; this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.timeoutMode == null) { - if (options.tailable) { - this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; - } else { - this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; - } - } else { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError( - "Cannot set tailable cursor's timeoutMode to LIFETIME" - ); - } - this.cursorOptions.timeoutMode = options.timeoutMode; + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); } + this.cursorOptions.timeoutMode = + options.timeoutMode ?? + (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); @@ -264,6 +264,17 @@ export abstract class AbstractCursor< utf8: options?.enableUtf8Validation === false ? false : true } }; + + if ( + options.timeoutContext != null && + options.timeoutMS != null && + this.cursorOptions.timeoutMode !== CursorTimeoutMode.LIFETIME + ) { + throw new MongoAPIError( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME.` + ); + } + this.timeoutContext = options.timeoutContext; } /** @@ -721,6 +732,9 @@ export abstract class AbstractCursor< * if the resultant data has already been retrieved by this cursor. */ rewind(): void { + if (this.timeoutContext && this.timeoutContext.owner !== this) { + throw new MongoAPIError(`Cannot rewind cursor that does not own its timeout context.`); + } if (!this.initialized) { return; } @@ -790,10 +804,13 @@ export abstract class AbstractCursor< */ private async cursorInit(): Promise { if (this.cursorOptions.timeoutMS != null) { - this.timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS: this.cursorOptions.timeoutMS - }); + this.timeoutContext ??= new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }), + this + ); } try { const state = await this._initialize(this.cursorSession); @@ -872,6 +889,20 @@ export abstract class AbstractCursor< private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; + const timeoutContextForKillCursors = (): CursorTimeoutContext | undefined => { + if (timeoutMS != null) { + this.timeoutContext?.clear(); + return new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }), + this + ); + } else { + return this.timeoutContext?.refreshed(); + } + }; try { if ( !this.isKilled && @@ -884,23 +915,13 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; - let timeoutContext: TimeoutContext | undefined; - if (timeoutMS != null) { - this.timeoutContext?.clear(); - timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS - }); - } else { - this.timeoutContext?.refresh(); - timeoutContext = this.timeoutContext; - } + await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session }), - timeoutContext + timeoutContextForKillCursors() ); } } catch (error) { @@ -1042,3 +1063,54 @@ class ReadableCursorStream extends Readable { } configureResourceManagement(AbstractCursor.prototype); + +/** + * @internal + * The cursor timeout context is a wrapper around a timeout context + * that keeps track of the "owner" of the cursor. For timeout contexts + * instantiated inside a cursor, the owner will be the cursor. + * + * All timeout behavior is exactly the same as the wrapped timeout context's. + */ +export class CursorTimeoutContext extends TimeoutContext { + constructor( + public timeoutContext: TimeoutContext, + public owner: symbol | AbstractCursor + ) { + super(); + } + override get serverSelectionTimeout(): Timeout | null { + return this.timeoutContext.serverSelectionTimeout; + } + override get connectionCheckoutTimeout(): Timeout | null { + return this.timeoutContext.connectionCheckoutTimeout; + } + override get clearServerSelectionTimeout(): boolean { + return this.timeoutContext.clearServerSelectionTimeout; + } + override get clearConnectionCheckoutTimeout(): boolean { + return this.timeoutContext.clearConnectionCheckoutTimeout; + } + override get timeoutForSocketWrite(): Timeout | null { + return this.timeoutContext.timeoutForSocketWrite; + } + override get timeoutForSocketRead(): Timeout | null { + return this.timeoutContext.timeoutForSocketRead; + } + override csotEnabled(): this is CSOTTimeoutContext { + return this.timeoutContext.csotEnabled(); + } + override refresh(): void { + return this.timeoutContext.refresh(); + } + override clear(): void { + return this.timeoutContext.clear(); + } + override get maxTimeMS(): number | null { + return this.timeoutContext.maxTimeMS; + } + + override refreshed(): CursorTimeoutContext { + return new CursorTimeoutContext(this.timeoutContext.refreshed(), this.owner); + } +} diff --git a/src/index.ts b/src/index.ts index 7f948f30ed4..82bbeb2aec7 100644 --- a/src/index.ts +++ b/src/index.ts @@ -358,6 +358,7 @@ export type { CursorStreamOptions } from './cursor/abstract_cursor'; export type { + CursorTimeoutContext, InitialCursorResponse, InternalAbstractCursorOptions } from './cursor/abstract_cursor'; diff --git a/src/operations/find.ts b/src/operations/find.ts index 641255553a0..348467acf75 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,6 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; -import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -17,7 +17,8 @@ import { Aspect, defineAspects, type Hint } from './operation'; */ // eslint-disable-next-line @typescript-eslint/no-unused-vars export interface FindOptions - extends Omit { + extends Omit, + AbstractCursorOptions { /** Sets the limit of documents returned in the query. */ limit?: number; /** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */ diff --git a/src/timeout.ts b/src/timeout.ts index f694b5f4f4f..9041ce4b88d 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -178,6 +178,8 @@ export abstract class TimeoutContext { else throw new MongoRuntimeError('Unrecognized options'); } + abstract get maxTimeMS(): number | null; + abstract get serverSelectionTimeout(): Timeout | null; abstract get connectionCheckoutTimeout(): Timeout | null; @@ -195,6 +197,9 @@ export abstract class TimeoutContext { abstract refresh(): void; abstract clear(): void; + + /** Returns a new instance of the TimeoutContext, with all timeouts refreshed and restarted. */ + abstract refreshed(): TimeoutContext; } /** @internal */ @@ -317,6 +322,10 @@ export class CSOTTimeoutContext extends TimeoutContext { throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); return remainingTimeMS; } + + override refreshed(): CSOTTimeoutContext { + return new CSOTTimeoutContext(this); + } } /** @internal */ @@ -363,4 +372,12 @@ export class LegacyTimeoutContext extends TimeoutContext { clear(): void { return; } + + get maxTimeMS() { + return null; + } + + override refreshed(): LegacyTimeoutContext { + return new LegacyTimeoutContext(this.options); + } } diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b2011ee2e73..f4cfc7d882c 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -26,7 +26,7 @@ import { MongoServerError, ObjectId } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; @@ -362,7 +362,7 @@ describe('CSOT driver tests', metadata, () => { }; beforeEach(async function () { - internalClient = this.configuration.newClient(); + internalClient = this.configuration.newClient({}); await internalClient .db('db') .dropCollection('coll') @@ -378,7 +378,11 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize: 10 }); + + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); @@ -492,7 +496,13 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + minPoolSize: 10 + }); + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); diff --git a/test/integration/crud/find_cursor_methods.test.js b/test/integration/crud/find_cursor_methods.test.js index 42eeda3e816..21a6649bf0b 100644 --- a/test/integration/crud/find_cursor_methods.test.js +++ b/test/integration/crud/find_cursor_methods.test.js @@ -1,7 +1,13 @@ 'use strict'; const { expect } = require('chai'); const { filterForCommands } = require('../shared'); -const { promiseWithResolvers, MongoCursorExhaustedError } = require('../../mongodb'); +const { + promiseWithResolvers, + MongoCursorExhaustedError, + CursorTimeoutContext, + TimeoutContext, + MongoAPIError +} = require('../../mongodb'); describe('Find Cursor', function () { let client; @@ -246,23 +252,45 @@ describe('Find Cursor', function () { }); context('#rewind', function () { - it('should rewind a cursor', function (done) { + it('should rewind a cursor', async function () { const coll = client.db().collection('abstract_cursor'); const cursor = coll.find({}); - this.defer(() => cursor.close()); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + try { + let docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); cursor.rewind(); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); + } finally { + await cursor.close(); + } + }); - done(); - }); - }); + it('throws if the cursor does not own its timeoutContext', async function () { + const coll = client.db().collection('abstract_cursor'); + const cursor = coll.find( + {}, + { + timeoutContext: new CursorTimeoutContext( + TimeoutContext.create({ + timeoutMS: 1000, + serverSelectionTimeoutMS: 1000 + }), + Symbol() + ) + } + ); + + try { + cursor.rewind(); + expect.fail(`rewind should have thrown.`); + } catch (error) { + expect(error).to.be.instanceOf(MongoAPIError); + } finally { + await cursor.close(); + } }); it('should end an implicit session on rewind', { diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index a5e7fba13dd..136e72a3499 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,12 +7,17 @@ import { inspect } from 'util'; import { AbstractCursor, type Collection, + CursorTimeoutContext, + CursorTimeoutMode, type FindCursor, MongoAPIError, type MongoClient, MongoCursorExhaustedError, - MongoServerError + MongoOperationTimeoutError, + MongoServerError, + TimeoutContext } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; describe('class AbstractCursor', function () { describe('regression tests NODE-5372', function () { @@ -395,4 +400,114 @@ describe('class AbstractCursor', function () { expect(nextSpy.callCount).to.be.lessThan(numDocuments); }); }); + + describe('externally provided timeout contexts', function () { + let client: MongoClient; + let collection: Collection; + let context: CursorTimeoutContext; + + beforeEach(async function () { + client = this.configuration.newClient(); + + collection = client.db('abstract_cursor_integration').collection('test'); + + context = new CursorTimeoutContext( + TimeoutContext.create({ timeoutMS: 1000, serverSelectionTimeoutMS: 2000 }), + Symbol() + ); + + await collection.insertMany([{ a: 1 }, { b: 2 }, { c: 3 }]); + }); + + afterEach(async function () { + await collection.deleteMany({}); + await client.close(); + }); + + describe('when timeoutMode != LIFETIME', function () { + it('an error is thrown', function () { + expect(() => + collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.ITERATION } + ) + ).to.throw( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME` + ); + }); + }); + + describe('when timeoutMode is omitted', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find({}, { timeoutContext: context, timeoutMS: 1000 }); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when timeoutMode is LIFETIME', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor is initialized', function () { + it('the provided timeoutContext is not overwritten', async function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + await cursor.toArray(); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor refreshes the timeout for killCursors', function () { + it( + 'the provided timeoutContext is not modified', + { + requires: { + mongodb: '>=4.4' + } + }, + async function () { + await client.db('admin').command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 5000 + } + } as FailPoint); + + const cursor = collection.find( + {}, + { + timeoutContext: context, + timeoutMS: 1000, + timeoutMode: CursorTimeoutMode.LIFETIME, + batchSize: 1 + } + ); + + const error = await cursor.toArray().catch(e => e); + + expect(error).to.be.instanceof(MongoOperationTimeoutError); + // @ts-expect-error We know we have a CSOT timeout context but TS does not. + expect(context.timeoutContext.remainingTimeMS).to.be.lessThan(0); + } + ); + }); + }); }); diff --git a/test/integration/server-selection/server_selection.prose.operation_count.test.ts b/test/integration/server-selection/server_selection.prose.operation_count.test.ts index fec6d24e61c..b4a7d9bf47b 100644 --- a/test/integration/server-selection/server_selection.prose.operation_count.test.ts +++ b/test/integration/server-selection/server_selection.prose.operation_count.test.ts @@ -1,5 +1,4 @@ import { expect } from 'chai'; -import { on } from 'events'; import { type Collection, @@ -7,7 +6,7 @@ import { HostAddress, type MongoClient } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { waitUntilPoolsFilled } from '../../tools/utils'; const failPoint = { configureFailPoint: 'failCommand', @@ -28,17 +27,6 @@ async function runTaskGroup(collection: Collection, count: 10 | 100 | 1000) { } } -async function ensurePoolIsFull(client: MongoClient): Promise { - let connectionCount = 0; - - for await (const _event of on(client, 'connectionCreated')) { - connectionCount++; - if (connectionCount === POOL_SIZE * 2) { - break; - } - } -} - // Step 1: Configure a sharded cluster with two mongoses. Use a 4.2.9 or newer server version. const TEST_METADATA: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } }; @@ -75,15 +63,8 @@ describe('operationCount-based Selection Within Latency Window - Prose Test', fu client.on('commandStarted', updateCount); - const poolIsFullPromise = ensurePoolIsFull(client); - - await client.connect(); - // Step 4: Using CMAP events, ensure the client's connection pools for both mongoses have been saturated - const poolIsFull = Promise.race([poolIsFullPromise, sleep(30 * 1000)]); - if (!poolIsFull) { - throw new Error('Timed out waiting for connection pool to fill to minPoolSize'); - } + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), POOL_SIZE * 2); seeds = client.topology.s.seedlist.map(address => address.toString()); diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 3cb50d2cd51..8614bd7d64c 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -1,5 +1,5 @@ import * as child_process from 'node:child_process'; -import { once } from 'node:events'; +import { on, once } from 'node:events'; import * as fs from 'node:fs/promises'; import * as path from 'node:path'; @@ -568,3 +568,33 @@ export async function itInNodeProcess( } }); } + +/** + * Connects the client and waits until `client` has emitted `count` connectionCreated events. + * + * **This will hang if the client does not have a maxPoolSizeSet!** + * + * This is useful when you want to ensure that the client has pools that are full of connections. + * + * This does not guarantee that all pools that the client has are completely full unless + * count = number of servers to which the client is connected * maxPoolSize. But it can + * serve as a way to ensure that some connections have been established and are in the pools. + */ +export async function waitUntilPoolsFilled( + client: MongoClient, + signal: AbortSignal, + count: number = client.s.options.maxPoolSize +): Promise { + let connectionCount = 0; + + async function wait$() { + for await (const _event of on(client, 'connectionCreated', { signal })) { + connectionCount++; + if (connectionCount >= count) { + break; + } + } + } + + await Promise.all([wait$(), client.connect()]); +} From cb12f64f60b9263f24a4fdc23d233fe8505e0ff3 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 7 Oct 2024 10:26:20 -0400 Subject: [PATCH 060/136] lint fix --- .../client_side_operations_timeout.prose.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 6f33af7141b..297d60e6c7c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -17,9 +17,9 @@ import { MongoOperationTimeoutError, MongoServerSelectionError, now, - squashError ObjectId, - promiseWithResolvers + promiseWithResolvers, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; From 9a1b2d0a6d36c2a92977efd708798aef56bb82bf Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 7 Oct 2024 13:07:46 -0400 Subject: [PATCH 061/136] feat(NODE-6305): Add CSOT support to tailable cursors (#4218) Co-authored-by: Neal Beeken --- src/cursor/abstract_cursor.ts | 51 +++- src/cursor/run_command_cursor.ts | 2 + src/mongo_client.ts | 5 + src/operations/create_collection.ts | 1 + test/benchmarks/driverBench/common.js | 4 +- ...ient_side_operations_timeout.prose.test.ts | 40 ++-- ...lient_side_operations_timeout.spec.test.ts | 7 +- .../node_csot.test.ts | 221 +++++++++++++++++- .../tailable-awaitData.json | 146 ++++++++++++ .../tailable-non-awaitData.json | 151 ++++++++++++ test/tools/unified-spec-runner/operations.ts | 45 +++- 11 files changed, 641 insertions(+), 32 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index f7e488d24b2..255a977a5f9 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -209,12 +209,35 @@ export abstract class AbstractCursor< options.readPreference && options.readPreference instanceof ReadPreference ? options.readPreference : ReadPreference.primary, - ...pluckBSONSerializeOptions(options) + ...pluckBSONSerializeOptions(options), + timeoutMS: options.timeoutMS, + tailable: options.tailable, + awaitData: options.awaitData }; - this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); + if (options.timeoutMode == null) { + if (options.tailable) { + this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; + + if (options.awaitData) { + if ( + options.maxAwaitTimeMS != null && + options.maxAwaitTimeMS >= this.cursorOptions.timeoutMS + ) + throw new MongoInvalidArgumentError( + 'Cannot specify maxAwaitTimeMS >= timeoutMS for a tailable awaitData cursor' + ); + } + } else { + this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; + } + } else { + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError( + "Cannot set tailable cursor's timeoutMode to LIFETIME" + ); + } + this.cursorOptions.timeoutMode = options.timeoutMode; } this.cursorOptions.timeoutMode = options.timeoutMode ?? @@ -223,6 +246,8 @@ export abstract class AbstractCursor< if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } + + // Set for initial command this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null && ((this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && @@ -781,15 +806,17 @@ export abstract class AbstractCursor< 'Unexpected null selectedServer. A cursor creating command should have set this' ); } + const getMoreOptions = { + ...this.cursorOptions, + session: this.cursorSession, + batchSize + }; + const getMoreOperation = new GetMoreOperation( this.cursorNamespace, this.cursorId, this.selectedServer, - { - ...this.cursorOptions, - session: this.cursorSession, - batchSize - } + getMoreOptions ); return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); @@ -814,6 +841,8 @@ export abstract class AbstractCursor< } try { const state = await this._initialize(this.cursorSession); + // Set omitMaxTimeMS to the value needed for subsequent getMore calls + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; const response = state.response; this.selectedServer = state.server; this.cursorId = response.id; @@ -866,9 +895,9 @@ export abstract class AbstractCursor< } catch (error) { try { await this.cleanup(undefined, error); - } catch (error) { + } catch (cleanupError) { // `cleanupCursor` should never throw, squash and throw the original error - squashError(error); + squashError(cleanupError); } throw error; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 6b31ce2263a..90e4a94fd42 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -23,6 +23,8 @@ export type RunCursorCommandOptions = { timeoutMS?: number; /** @internal */ timeoutMode?: CursorTimeoutMode; + tailable?: boolean; + awaitData?: boolean; } & BSONSerializeOptions; /** @public */ diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 092e9418b3a..9348a2300e0 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -483,6 +483,11 @@ export class MongoClient extends TypedEventEmitter implements return this.s.bsonOptions; } + /** @internal */ + get timeoutMS(): number | undefined { + return this.options.timeoutMS; + } + /** * Executes a client bulk write operation, available on server 8.0+. * @param models - The client bulk write models. diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index afb2680b9a0..293ecc8be52 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -17,6 +17,7 @@ import { Aspect, defineAspects } from './operation'; const ILLEGAL_COMMAND_FIELDS = new Set([ 'w', 'wtimeout', + 'timeoutMS', 'j', 'fsync', 'autoIndexId', diff --git a/test/benchmarks/driverBench/common.js b/test/benchmarks/driverBench/common.js index bb5b48babfd..3ffd309572a 100644 --- a/test/benchmarks/driverBench/common.js +++ b/test/benchmarks/driverBench/common.js @@ -24,7 +24,9 @@ function loadSpecString(filePath) { } function makeClient() { - this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017'); + this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017', { + timeoutMS: 0 + }); } function connectClient() { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1b8c34633b4..09b95d6dff0 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -77,7 +77,7 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { await internalClient .db('db') - .collection('coll') + .collection('bulkWriteTest') .drop() .catch(() => null); await internalClient.db('admin').command(failpoint); @@ -93,7 +93,7 @@ describe('CSOT spec prose tests', function () { const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); const error = await client .db('db') - .collection<{ _id: number; a: Uint8Array }>('coll') + .collection<{ _id: number; a: Uint8Array }>('bulkWriteTest') .insertMany(oneMBDocs) .catch(error => error); @@ -265,6 +265,7 @@ describe('CSOT spec prose tests', function () { }); context('5. Blocking Iteration Methods', () => { + const metadata = { requires: { mongodb: '>=4.4' } }; /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -276,7 +277,7 @@ describe('CSOT spec prose tests', function () { data: { failCommands: ['getMore'], blockConnection: true, - blockTimeMS: 20 + blockTimeMS: 90 } }; let internalClient: MongoClient; @@ -286,7 +287,11 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { internalClient = this.configuration.newClient(); - await internalClient.db('db').dropCollection('coll'); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); // Creating capped collection to be able to create tailable find cursor const coll = await internalClient .db('db') @@ -294,7 +299,13 @@ describe('CSOT spec prose tests', function () { await coll.insertOne({ x: 1 }); await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + timeoutMS: 100, + minPoolSize: 20 + }); + await client.connect(); + commandStarted = []; commandSucceeded = []; @@ -337,11 +348,11 @@ describe('CSOT spec prose tests', function () { * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('send correct number of finds and getMores', async function () { + it('send correct number of finds and getMores', metadata, async function () { const cursor = client .db('db') .collection('coll') - .find({}, { tailable: true, awaitData: true }) + .find({}, { tailable: true }) .project({ _id: 0 }); const doc = await cursor.next(); expect(doc).to.deep.equal({ x: 1 }); @@ -358,7 +369,7 @@ describe('CSOT spec prose tests', function () { expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); // Expect 2 getMore expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); - }).skipReason = 'TODO(NODE-6305)'; + }); }); context('Change Streams', () => { @@ -383,8 +394,11 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('sends correct number of aggregate and getMores', async function () { - const changeStream = client.db('db').collection('coll').watch(); + it.skip('sends correct number of aggregate and getMores', metadata, async function () { + const changeStream = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: 20, maxAwaitTimeMS: 19 }); const maybeError = await changeStream.next().then( () => null, e => e @@ -397,9 +411,9 @@ describe('CSOT spec prose tests', function () { const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); // Expect 1 aggregate expect(aggregates).to.have.lengthOf(1); - // Expect 1 getMore - expect(getMores).to.have.lengthOf(1); - }).skipReason = 'TODO(NODE-6305)'; + // Expect 2 getMores + expect(getMores).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6387)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 49ddabc924b..d72e9bc5ebe 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -25,7 +25,12 @@ const skippedTests = { 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': 'TODO(DRIVERS-2965)', 'maxTimeMS value in the command is less than timeoutMS': - 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(DRIVERS-2965)', + 'timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs' // Skipping for both tailable awaitData and tailable non-awaitData cursors }; describe('CSOT spec tests', function () { diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f4cfc7d882c..b1516454cc7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -31,13 +31,18 @@ import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; describe('CSOT driver tests', metadata, () => { + // NOTE: minPoolSize here is set to ensure that connections are available when testing timeout + // behaviour. This reduces flakiness in our tests since operations will not spend time + // establishing connections, more closely mirroring long-running application behaviour + const minPoolSize = 20; + describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; let coll: Collection; beforeEach(async function () { - client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + client = this.configuration.newClient(undefined, { timeoutMS: 100, minPoolSize }); db = client.db('test', { timeoutMS: 200 }); }); @@ -159,7 +164,10 @@ describe('CSOT driver tests', metadata, () => { metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; - client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + timeoutMS: 1, + monitorCommands: true + }); client.on('commandStarted', ev => commandsStarted.push(ev)); @@ -591,6 +599,211 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('Tailable cursors', function () { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate', 'find', 'getMore'], + blockConnection: true, + blockTimeMS: 100 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + + await internalClient.db('db').createCollection('coll', { capped: true, size: 1_000_000 }); + + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 100 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize }); + commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client.connect(); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('when in ITERATION mode', function () { + context('awaitData cursors', function () { + let cursor: FindCursor; + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, awaitData: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, awaitData: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not use timeoutMS to compute maxTimeMS for getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 10_000, tailable: true, awaitData: true, batchSize: 1 }); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.not.haveOwnProperty('maxTimeMS'); + }); + + context('when maxAwaitTimeMS is specified', function () { + it( + 'sets maxTimeMS to the configured maxAwaitTimeMS value on getMores', + metadata, + async function () { + cursor = client.db('db').collection('coll').find( + {}, + { + timeoutMS: 10_000, + tailable: true, + awaitData: true, + batchSize: 1, + maxAwaitTimeMS: 100 + } + ); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.haveOwnProperty('maxTimeMS'); + expect(getMore.maxTimeMS).to.equal(100); + } + ); + }); + }); + + context('non-awaitData cursors', function () { + let cursor: FindCursor; + + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not append a maxTimeMS field to original command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + + const finds = commandStarted.filter(x => x.command.find != null); + expect(finds).to.have.lengthOf(1); + expect(finds[0].command.find).to.exist; + expect(finds[0].command.maxTimeMS).to.not.exist; + }); + it('does not append a maxTimeMS field to subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted.filter(x => x.command.getMore != null); + + expect(getMores).to.have.lengthOf(1); + expect(getMores[0].command.getMore).to.exist; + expect(getMores[0].command.getMore.maxTimeMS).to.not.exist; + }); + }); + }); + }); + describe('GridFSBucket', () => { const blockTimeMS = 200; let internalClient: MongoClient; @@ -798,6 +1011,10 @@ describe('CSOT driver tests', metadata, () => { beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 123 }); + await client + .db('db') + .dropCollection('coll') + .catch(() => null); }); afterEach(async function () { diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json new file mode 100644 index 00000000000..17da3e3c0c9 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json @@ -0,0 +1,146 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json new file mode 100644 index 00000000000..80cf74a1116 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json @@ -0,0 +1,151 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index a9f79842c31..f7c34a70239 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -268,7 +268,18 @@ operations.set('createCollection', async ({ entities, operation }) => { operations.set('createFindCursor', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.find(filter, opts); // The spec dictates that we create the cursor and force the find command // to execute, but don't move the cursor forward. hasNext() accomplishes @@ -332,7 +343,18 @@ operations.set('find', async ({ entities, operation }) => { } else { queryable = entities.getEntity('collection', operation.object); } - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } return queryable.find(filter, opts).toArray(); }); @@ -804,10 +826,25 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc operations.set('createCommandCursor', async ({ entities, operation }: OperationFunctionParams) => { const collection = entities.getEntity('db', operation.object); - const { command, ...opts } = operation.arguments!; + const { command, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + tailable: opts.tailable, + awaitData: opts.awaitData, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); From 842b52a87b79392d26b4dc385af1900ed0387e9c Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Fri, 4 Oct 2024 14:06:28 -0400 Subject: [PATCH 062/136] lint fix temp temp ready --- .../client_side_encryption.test.ts | 98 +++++++++++++++++++ ...ient_side_operations_timeout.prose.test.ts | 4 +- ...lient_side_operations_timeout.unit.test.ts | 5 +- test/tools/runner/hooks/leak_checker.ts | 2 +- .../auto_encrypter.test.ts | 69 +------------ 5 files changed, 103 insertions(+), 75 deletions(-) create mode 100644 test/integration/client-side-encryption/client_side_encryption.test.ts diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts new file mode 100644 index 00000000000..1291ffec12e --- /dev/null +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -0,0 +1,98 @@ +import { expect } from 'chai'; +import * as sinon from 'sinon'; +import { setTimeout } from 'timers'; +import { TLSSocket } from 'tls'; +import { promisify } from 'util'; + +import { MongoClient } from '../../mongodb'; +import { getEncryptExtraOptions } from '../../tools/utils'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; + +describe('Auto Encryption (Integration)', function () { + describe.skip('CSOT', function () { + let client; + let clock; + let timerSandbox; + let sleep; + + const getKmsProviders = () => { + const my_key = Buffer.from( + 'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk', + 'base64' + ); + return { local: { key: my_key } }; + }; + const keyVaultNamespace = 'keyvault.datakeys'; + + afterEach(async function () { + await client?.close(); + }); + + context('when client is provided timeoutContext', function () { + it('should time out command sent through after timeoutMS', async function () { + client = new MongoClient('mongodb://localhost:27017', { + autoEncryption: { + keyVaultNamespace, + kmsProviders: getKmsProviders(), + extraOptions: getEncryptExtraOptions() + }, + timeoutMS: 10000 + }); + await client.connect(); + + const err$ = await client + .db('test') + .command({ ping: 1 }) + .catch(e => e); + const err = err$; + console.log(err); + }); + }); + + context('when client is not provided timeoutContext', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('should not timeout the command sent through autoEncryption after timeoutMS', async function () { + client = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace, + kmsProviders: getKmsProviders(), + extraOptions: getEncryptExtraOptions() + } + } + ); + + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([ + client.db('test').collection('test').insert({ a: 1 }), + sleepingFn() + ]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); + }); + }); + }); +}); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 6f33af7141b..297d60e6c7c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -17,9 +17,9 @@ import { MongoOperationTimeoutError, MongoServerSelectionError, now, - squashError ObjectId, - promiseWithResolvers + promiseWithResolvers, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 1ed9a1ca3bb..80f560ac617 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -219,7 +219,7 @@ describe('CSOT spec unit tests', function () { }); }); - context('when client is not provided timeoutMS`', function () { + context('when client is not provided timeoutMS', function () { it('should pass timeoutMS into commands sent to mongocryptd', async function () { client = this.configuration.newClient( {}, @@ -241,9 +241,6 @@ describe('CSOT spec unit tests', function () { }); }); }); - - // TODO(NODE-6390): Add timeoutMS support to Auto Encryption - it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); }); context.skip('Background Connection Pooling', function () { diff --git a/test/tools/runner/hooks/leak_checker.ts b/test/tools/runner/hooks/leak_checker.ts index 4f53c031dab..8665af5e1cb 100644 --- a/test/tools/runner/hooks/leak_checker.ts +++ b/test/tools/runner/hooks/leak_checker.ts @@ -140,7 +140,7 @@ const leakCheckerAfterEach = async function () { } }; -const TRACE_SOCKETS = process.env.TRACE_SOCKETS === 'true' ? true : false; +const TRACE_SOCKETS = true; // process.env.TRACE_SOCKETS === 'true' ? true : false; const kSocketId = Symbol('socketId'); const originalCreateConnection = net.createConnection; let socketCounter = 0n; diff --git a/test/unit/client-side-encryption/auto_encrypter.test.ts b/test/unit/client-side-encryption/auto_encrypter.test.ts index 3f0e3386031..1e13c0b07c5 100644 --- a/test/unit/client-side-encryption/auto_encrypter.test.ts +++ b/test/unit/client-side-encryption/auto_encrypter.test.ts @@ -11,8 +11,7 @@ import { MongocryptdManager } from '../../../src/client-side-encryption/mongocry import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { MongoClient } from '../../../src/mongo_client'; -import { BSON, CSOTTimeoutContext, type DataKey } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { BSON, type DataKey } from '../../mongodb'; import * as requirements from './requirements.helper'; const bson = BSON; @@ -375,70 +374,4 @@ describe('AutoEncrypter', function () { it('should provide the libmongocrypt version', function () { expect(AutoEncrypter.libmongocryptVersion).to.be.a('string'); }); - - describe('CSOT', function () { - let autoEncrypter: AutoEncrypter; - let stateMachineSpy; - let client; - - beforeEach(async function () { - client = new MockClient() as MongoClient; - autoEncrypter = new AutoEncrypter(client, { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - }); - await autoEncrypter.init(); - stateMachineSpy = sinon.spy(StateMachine.prototype, 'execute'); - }); - - afterEach(async function () { - sinon.restore(); - }); - - describe('#encrypt', function () { - context('when encrypt is provided a timeoutContext', function () { - it('should call stateMachine.execute with a timeoutMS', async function () { - const timeoutContext = new CSOTTimeoutContext({ - timeoutMS: 500, - serverSelectionTimeoutMS: 30000 - }); - await sleep(300); - await autoEncrypter.encrypt('test.test', { hello: 1 }, { timeoutContext }); - expect(stateMachineSpy.getCalls()[0].args[2]).to.not.be.undefined; - expect(stateMachineSpy.getCalls()[0].args[2].remainingTimeMS).to.be.lessThanOrEqual(200); - }); - }); - context('when encrypt is not provided a timeoutContext', function () { - it('should call stateMachine.execute without a timeoutMS', async function () { - await autoEncrypter.encrypt('test.test', { hello: 1 }); - expect(stateMachineSpy.getCalls()[0].args[2]).to.be.undefined; - }); - }); - }); - - describe('#decrypt', function () { - context('when decrypt is provided a timeoutContext', function () { - it('should respect remainingTimeMS', async function () { - const timeoutContext = new CSOTTimeoutContext({ - timeoutMS: 500, - serverSelectionTimeoutMS: 30000 - }); - await sleep(300); - await autoEncrypter.decrypt(BSON.serialize({ ok: 1 }), { timeoutContext }); - expect(stateMachineSpy.getCalls()[0].args[2]).to.not.be.undefined; - expect(stateMachineSpy.getCalls()[0].args[2].remainingTimeMS).to.be.lessThanOrEqual(200); - }); - }); - - context('when decrypt is not provided a timeoutContext', function () { - it('should call stateMachine.execute without a timeoutMS', async function () { - await autoEncrypter.decrypt(BSON.serialize({ ok: 1 })); - expect(stateMachineSpy.getCalls()[0].args[2]).to.be.undefined; - }); - }); - }); - }); }); From d26a588a70d7ff22f74a736668ec6d72aaf04ed2 Mon Sep 17 00:00:00 2001 From: Aditi Khare <106987683+aditi-khare-mongoDB@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:37:08 -0400 Subject: [PATCH 063/136] feat(NODE-6389): add support for timeoutMS in StateMachine.execute() (#4243) Co-authored-by: Warren James Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- src/client-side-encryption/state_machine.ts | 88 +++++++---- src/sdam/server.ts | 4 + ...ient_side_operations_timeout.prose.test.ts | 87 +++++++++-- ...lient_side_operations_timeout.unit.test.ts | 104 +++++++++++-- .../state_machine.test.ts | 143 +++++++++++++++++- 5 files changed, 371 insertions(+), 55 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index af3ea4c215d..f47ee191b54 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -12,7 +12,9 @@ import { } from '../bson'; import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; +import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; +import { Timeout, type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -173,6 +175,7 @@ export type StateMachineOptions = { * An internal class that executes across a MongoCryptContext until either * a finishing state or an error is reached. Do not instantiate directly. */ +// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs export class StateMachine { constructor( private options: StateMachineOptions, @@ -182,7 +185,11 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext): Promise { + async execute( + executor: StateMachineExecutable, + context: MongoCryptContext, + timeoutContext?: TimeoutContext + ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -201,8 +208,13 @@ export class StateMachine { 'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined' ); } - const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); + const collInfo = await this.fetchCollectionInfo( + metaDataClient, + context.ns, + filter, + timeoutContext + ); if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -222,9 +234,9 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind(this, mongocryptdClient, context.ns, command) + this.markCommand.bind(this, mongocryptdClient, context.ns, command, timeoutContext) ) - : await this.markCommand(mongocryptdClient, context.ns, command); + : await this.markCommand(mongocryptdClient, context.ns, command, timeoutContext); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -233,7 +245,12 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); - const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); + const keys = await this.fetchKeys( + keyVaultClient, + keyVaultNamespace, + filter, + timeoutContext + ); if (keys.length === 0) { // See docs on EMPTY_V @@ -255,9 +272,7 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - const requests = Array.from(this.requests(context)); - await Promise.all(requests); - + await Promise.all(this.requests(context, timeoutContext)); context.finishKMSRequests(); break; } @@ -299,7 +314,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutContext?: TimeoutContext): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -329,10 +344,6 @@ export class StateMachine { } } - function ontimeout() { - return new MongoCryptError('KMS request timed out'); - } - function onerror(cause: Error) { return new MongoCryptError('KMS request failed', { cause }); } @@ -364,7 +375,6 @@ export class StateMachine { resolve: resolveOnNetSocketConnect } = promiseWithResolvers(); netSocket - .once('timeout', () => rejectOnNetSocketError(ontimeout())) .once('error', err => rejectOnNetSocketError(onerror(err))) .once('close', () => rejectOnNetSocketError(onclose())) .once('connect', () => resolveOnNetSocketConnect()); @@ -410,8 +420,8 @@ export class StateMachine { reject: rejectOnTlsSocketError, resolve } = promiseWithResolvers(); + socket - .once('timeout', () => rejectOnTlsSocketError(ontimeout())) .once('error', err => rejectOnTlsSocketError(onerror(err))) .once('close', () => rejectOnTlsSocketError(onclose())) .on('data', data => { @@ -425,20 +435,26 @@ export class StateMachine { resolve(); } }); - await willResolveKmsRequest; + await (timeoutContext?.csotEnabled() + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) + : willResolveKmsRequest); + } catch (error) { + if (error instanceof TimeoutError) + throw new MongoOperationTimeoutError('KMS request timed out'); + throw error; } finally { // There's no need for any more activity on this socket at this point. destroySockets(); } } - *requests(context: MongoCryptContext) { + *requests(context: MongoCryptContext, timeoutContext?: TimeoutContext) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request); + yield this.kmsRequest(request, timeoutContext); } } @@ -498,7 +514,8 @@ export class StateMachine { async fetchCollectionInfo( client: MongoClient, ns: string, - filter: Document + filter: Document, + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); @@ -506,7 +523,10 @@ export class StateMachine { .db(db) .listCollections(filter, { promoteLongs: false, - promoteValues: false + promoteValues: false, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {}) }) .toArray(); @@ -522,12 +542,22 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array): Promise { - const options = { promoteLongs: false, promoteValues: false }; + async markCommand( + client: MongoClient, + ns: string, + command: Uint8Array, + timeoutContext?: TimeoutContext + ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const rawCommand = deserialize(command, options); + const bsonOptions = { promoteLongs: false, promoteValues: false }; + const rawCommand = deserialize(command, bsonOptions); - const response = await client.db(db).command(rawCommand, options); + const response = await client.db(db).command(rawCommand, { + ...bsonOptions, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS } + : undefined) + }); return serialize(response, this.bsonOptions); } @@ -543,7 +573,8 @@ export class StateMachine { fetchKeys( client: MongoClient, keyVaultNamespace: string, - filter: Uint8Array + filter: Uint8Array, + timeoutContext?: TimeoutContext ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -551,7 +582,12 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter)) + .find( + deserialize(filter), + timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {} + ) .toArray(); } } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 08325086d53..7ab2d9a043f 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,6 +311,10 @@ export class Server extends TypedEventEmitter { delete finalOptions.readPreference; } + if (this.description.iscryptd) { + finalOptions.omitMaxTimeMS = true; + } + const session = finalOptions.session; let conn = session?.pinnedConnection; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 09b95d6dff0..80da92e10a3 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,5 +1,7 @@ /* Specification prose tests */ +import { type ChildProcess, spawn } from 'node:child_process'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -16,7 +18,8 @@ import { MongoServerSelectionError, now, ObjectId, - promiseWithResolvers + promiseWithResolvers, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -103,17 +106,55 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { - /** - * This test MUST only be run against enterprise server versions 4.2 and higher. - * - * 1. Launch a mongocryptd process on 23000. - * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. - * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. - * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. - */ - }); + context( + '2. maxTimeMS is not set for commands sent to mongocryptd', + { requires: { mongodb: '>=4.2' } }, + () => { + /** + * This test MUST only be run against enterprise server versions 4.2 and higher. + * + * 1. Launch a mongocryptd process on 23000. + * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. + * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. + * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + */ + + let client: MongoClient; + const mongocryptdTestPort = '23000'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { + monitorCommands: true + }); + }); + + afterEach(async function () { + await client.close(); + childProcess.kill('SIGKILL'); + sinon.restore(); + }); + + it('maxTimeMS is not set', async function () { + const commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client + .db('admin') + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(commandStarted).to.have.lengthOf(1); + expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); + }); + } + ); + // TODO(NODE-6391): Add timeoutMS support to Explicit Encryption context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, @@ -720,6 +761,30 @@ describe('CSOT spec prose tests', function () { 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 944d9b96048..7387099a7f1 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -6,8 +6,22 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; - -import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; +import { setTimeout } from 'timers'; +import { TLSSocket } from 'tls'; +import { promisify } from 'util'; + +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + ConnectionPool, + CSOTTimeoutContext, + type MongoClient, + MongoOperationTimeoutError, + Timeout, + TimeoutContext, + Topology +} from '../../mongodb'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -93,17 +107,83 @@ describe('CSOT spec unit tests', function () { }).skipReason = 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; - context.skip('Client side encryption', function () { - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); + describe('Client side encryption', function () { + describe('KMS requests', function () { + const stateMachine = new StateMachine({} as any); + const request = { + addResponse: _response => {}, + status: { + type: 1, + code: 1, + message: 'notARealStatus' + }, + bytesNeeded: 500, + kmsProvider: 'notRealAgain', + endpoint: 'fake', + message: Buffer.from('foobar') + }; + + context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) {}); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request times out through remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); + + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); + }); + }); + }); - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + // TODO(NODE-6390): Add timeoutMS support to Auto Encryption + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); + }); context.skip('Background Connection Pooling', function () { context( diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..95bb6056355 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -12,9 +12,17 @@ import * as tls from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { Db } from '../../../src/db'; -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { MongoClient } from '../../../src/mongo_client'; -import { Int32, Long, serialize } from '../../mongodb'; +import { + BSON, + Collection, + CSOTTimeoutContext, + Int32, + Long, + MongoClient, + serialize, + squashError +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; describe('StateMachine', function () { class MockRequest implements MongoCryptKMSRequest { @@ -74,12 +82,10 @@ describe('StateMachine', function () { const options = { promoteLongs: false, promoteValues: false }; const serializedCommand = serialize(command); const stateMachine = new StateMachine({} as any); - // eslint-disable-next-line @typescript-eslint/no-empty-function - const callback = () => {}; context('when executing the command', function () { it('does not promote values', function () { - stateMachine.markCommand(clientStub, 'test.coll', serializedCommand, callback); + stateMachine.markCommand(clientStub, 'test.coll', serializedCommand); expect(runCommandStub.calledWith(command, options)).to.be.true; }); }); @@ -461,4 +467,129 @@ describe('StateMachine', function () { expect.fail('missed exception'); }); }); + + describe('CSOT', function () { + describe('#fetchKeys', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let findSpy; + + beforeEach(async function () { + findSpy = sinon.spy(Collection.prototype, 'find'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.fetchKeys() is passed a `CSOTimeoutContext`', function () { + it('collection.find runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.fetchKeys() is not passed a `CSOTimeoutContext`', function () { + it('collection.find runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#markCommand', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let dbCommandSpy; + + beforeEach(async function () { + dbCommandSpy = sinon.spy(Db.prototype, 'command'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.markCommand() is passed a `CSOTimeoutContext`', function () { + it('db.command runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.markCommand() is not passed a `CSOTimeoutContext`', function () { + it('db.command runs with an undefined timeoutMS property', async function () { + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let listCollectionsSpy; + + beforeEach(async function () { + listCollectionsSpy = sinon.spy(Db.prototype, 'listCollections'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context( + 'when StateMachine.fetchCollectionInfo() is passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + } + ); + + context( + 'when StateMachine.fetchCollectionInfo() is not passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + } + ); + }); + }); }); From 8013fe59176b70eb460a76e9dc14aa0833b32724 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 7 Oct 2024 15:36:02 -0400 Subject: [PATCH 064/136] ready for rebase --- .../client_side_encryption.test.ts | 151 ++++++++++-------- .../collection-management/collection.test.ts | 5 +- test/tools/runner/hooks/leak_checker.ts | 2 +- 3 files changed, 85 insertions(+), 73 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 1291ffec12e..3e1e7ff7b04 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -1,98 +1,107 @@ import { expect } from 'chai'; -import * as sinon from 'sinon'; -import { setTimeout } from 'timers'; -import { TLSSocket } from 'tls'; -import { promisify } from 'util'; -import { MongoClient } from '../../mongodb'; -import { getEncryptExtraOptions } from '../../tools/utils'; -import { createTimerSandbox } from '../../unit/timer_sandbox'; +import { MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; describe('Auto Encryption (Integration)', function () { - describe.skip('CSOT', function () { - let client; - let clock; - let timerSandbox; - let sleep; + describe('CSOT', function () { + let setupClient; - const getKmsProviders = () => { - const my_key = Buffer.from( - 'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk', - 'base64' - ); - return { local: { key: my_key } }; - }; - const keyVaultNamespace = 'keyvault.datakeys'; + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate'], + errorCode: 89 + } + } as FailPoint); + }); afterEach(async function () { - await client?.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['aggregate'], + errorCode: 89 + } + } as FailPoint); + await setupClient.close(); }); - context('when client is provided timeoutContext', function () { - it('should time out command sent through after timeoutMS', async function () { - client = new MongoClient('mongodb://localhost:27017', { - autoEncryption: { - keyVaultNamespace, - kmsProviders: getKmsProviders(), - extraOptions: getEncryptExtraOptions() - }, - timeoutMS: 10000 - }); - await client.connect(); - - const err$ = await client - .db('test') - .command({ ping: 1 }) - .catch(e => e); - const err = err$; - console.log(err); - }); - }); + context('when client is provided timeoutMS and command hangs', function () { + let encryptedClient; - context('when client is not provided timeoutContext', function () { beforeEach(async function () { - sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { - clock.tick(30000); - }); - timerSandbox = createTimerSandbox(); - clock = sinon.useFakeTimers(); - sleep = promisify(setTimeout); + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); }); afterEach(async function () { - if (clock) { - timerSandbox.restore(); - clock.restore(); - clock = undefined; - } - sinon.restore(); + await encryptedClient.close(); + }); + + it('the command should fail due to a timeout error', async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); }); + }); - it('should not timeout the command sent through autoEncryption after timeoutMS', async function () { - client = this.configuration.newClient( + context('when client is not provided timeoutMS and command hangs', function () { + let encryptedClient; + beforeEach(async function () { + encryptedClient = this.configuration.newClient( {}, { autoEncryption: { - keyVaultNamespace, - kmsProviders: getKmsProviders(), - extraOptions: getEncryptExtraOptions() + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } } } ); + }); - const sleepingFn = async () => { - await sleep(30000); - throw Error('Slept for 30s'); - }; + afterEach(async function () { + encryptedClient.close(); + }); - const err$ = Promise.all([ - client.db('test').collection('test').insert({ a: 1 }), - sleepingFn() - ]).catch(e => e); - clock.tick(30000); - const err = await err$; - expect(err.message).to.equal('Slept for 30s'); + it('the command should fail due to a server error', async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoServerError); }); }); }); -}); +}); \ No newline at end of file diff --git a/test/integration/collection-management/collection.test.ts b/test/integration/collection-management/collection.test.ts index 809b4697dea..1d4f2b7b2e6 100644 --- a/test/integration/collection-management/collection.test.ts +++ b/test/integration/collection-management/collection.test.ts @@ -493,7 +493,10 @@ describe('Collection', function () { .command({ configureFailPoint: 'failCommand', mode: 'off', - data: { failCommands: ['aggregate'] } + data: { + failCommands: ['aggregate'], + blockTimeMS: 10000 + } } as FailPoint); }); diff --git a/test/tools/runner/hooks/leak_checker.ts b/test/tools/runner/hooks/leak_checker.ts index 8665af5e1cb..4f53c031dab 100644 --- a/test/tools/runner/hooks/leak_checker.ts +++ b/test/tools/runner/hooks/leak_checker.ts @@ -140,7 +140,7 @@ const leakCheckerAfterEach = async function () { } }; -const TRACE_SOCKETS = true; // process.env.TRACE_SOCKETS === 'true' ? true : false; +const TRACE_SOCKETS = process.env.TRACE_SOCKETS === 'true' ? true : false; const kSocketId = Symbol('socketId'); const originalCreateConnection = net.createConnection; let socketCounter = 0n; From aab8e3305b2105b95b904e3167c0ed5ee4aaba6d Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 7 Oct 2024 17:21:24 -0400 Subject: [PATCH 065/136] ready for review --- .../client_side_encryption.test.ts | 405 ++++++++++++++---- 1 file changed, 321 insertions(+), 84 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 3e1e7ff7b04..9529a9f809b 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -1,107 +1,344 @@ import { expect } from 'chai'; -import { MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + BSON, + CSOTTimeoutContext, + MongoOperationTimeoutError, + MongoServerError +} from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('Auto Encryption (Integration)', function () { +describe('Client-Side Encryption (Integration)', function () { describe('CSOT', function () { - let setupClient; - - beforeEach(async function () { - setupClient = this.configuration.newClient(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'alwaysOn', - data: { - failCommands: ['aggregate'], - errorCode: 89 - } - } as FailPoint); - }); - - afterEach(async function () { - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'off', - data: { - failCommands: ['aggregate'], - errorCode: 89 - } - } as FailPoint); - await setupClient.close(); - }); - - context('when client is provided timeoutMS and command hangs', function () { - let encryptedClient; + describe('Auto encryption', function () { + let setupClient; beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - }, - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate'], + errorCode: 89 + } + } as FailPoint); }); afterEach(async function () { - await encryptedClient.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['aggregate'], + errorCode: 89 + } + } as FailPoint); + await setupClient.close(); }); - it('the command should fail due to a timeout error', async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); + context('when client is provided timeoutMS and command hangs', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient.close(); + }); + + it('the command should fail due to a timeout error', async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + }); }); - }); - context('when client is not provided timeoutMS and command hangs', function () { - let encryptedClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } + context('when client is not provided timeoutMS and command hangs', function () { + let encryptedClient; + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } } } - } - ); + ); + }); + + afterEach(async function () { + encryptedClient.close(); + }); + + it('the command should fail due to a server error', async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoServerError); + }); }); + }); - afterEach(async function () { - encryptedClient.close(); + describe('State machine', function () { + const stateMachine = new StateMachine({} as any); + + const timeoutContext = () => { + return new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + }; + + describe('#markCommand', function () { + context.skip('when provided timeoutContext and command hangs', function () { + let encryptedClient; + let setupClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping'], + errorCode: 89 + } + } as FailPoint); + }); + + afterEach(async function () { + await encryptedClient?.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping'], + errorCode: 89 + } + } as FailPoint); + await setupClient.close(); + }); + + it('the command should fail due to a timeout error', async function () { + const err = await stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + context('when not provided timeoutContext and command hangs', function () { + let encryptedClient; + let setupClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + } + } + ); + await encryptedClient.connect(); + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping'], + errorCode: 89 + } + } as FailPoint); + }); + + afterEach(async function () { + await encryptedClient?.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping'], + errorCode: 89 + } + } as FailPoint); + await setupClient.close(); + }); + + it('the command should fail due to a server error', async function () { + const err = await stateMachine + .markCommand(encryptedClient, 'test.test', BSON.serialize({ ping: 1 })) + .catch(e => e); + expect(err).to.be.instanceOf(MongoServerError); + }); + }); }); - it('the command should fail due to a server error', async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.be.instanceOf(MongoServerError); + describe('#fetchKeys', function () { + let setupClient; + + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find'], + errorCode: 89 + } + } as FailPoint); + }); + + afterEach(async function () { + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['find'], + errorCode: 89 + } + } as FailPoint); + await setupClient.close(); + }); + + context('when provided timeoutContext and command hangs', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command should fail due to a timeout error', async function () { + const err = await stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + context('when not provided timeoutContext and command hangs', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + } + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command should fail due to a server error', async function () { + const err = await stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })) + .catch(e => e); + expect(err).to.be.instanceOf(MongoServerError); + }); + }); }); }); }); -}); \ No newline at end of file +}); From cf8b0d808c6c46e7e75fa936d941afe1e0d399dc Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 7 Oct 2024 17:22:36 -0400 Subject: [PATCH 066/136] remove extra change --- test/integration/collection-management/collection.test.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/integration/collection-management/collection.test.ts b/test/integration/collection-management/collection.test.ts index 1d4f2b7b2e6..5414336be7d 100644 --- a/test/integration/collection-management/collection.test.ts +++ b/test/integration/collection-management/collection.test.ts @@ -494,8 +494,7 @@ describe('Collection', function () { configureFailPoint: 'failCommand', mode: 'off', data: { - failCommands: ['aggregate'], - blockTimeMS: 10000 + failCommands: ['aggregate'] } } as FailPoint); }); From 24e0a3561f5611c049761ee9485b22de05289a13 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 7 Oct 2024 17:22:56 -0400 Subject: [PATCH 067/136] remove extra change --- test/integration/collection-management/collection.test.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/integration/collection-management/collection.test.ts b/test/integration/collection-management/collection.test.ts index 5414336be7d..809b4697dea 100644 --- a/test/integration/collection-management/collection.test.ts +++ b/test/integration/collection-management/collection.test.ts @@ -493,9 +493,7 @@ describe('Collection', function () { .command({ configureFailPoint: 'failCommand', mode: 'off', - data: { - failCommands: ['aggregate'] - } + data: { failCommands: ['aggregate'] } } as FailPoint); }); From 6b89760b2cd8b7ab8dab250aaf75b9663d655f98 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 8 Oct 2024 15:53:23 -0400 Subject: [PATCH 068/136] ready for review --- .../client_side_encryption.test.ts | 115 +++++++++--------- ...lient_side_operations_timeout.unit.test.ts | 105 +++++++++++----- 2 files changed, 134 insertions(+), 86 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 9529a9f809b..9e94381af25 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -1,14 +1,20 @@ +import { setTimeout } from 'node:timers/promises'; +import { promisify } from 'node:util'; + import { expect } from 'chai'; +import * as sinon from 'sinon'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { BSON, + Connection, CSOTTimeoutContext, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, sleep } from '../../tools/utils'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; describe('Client-Side Encryption (Integration)', function () { describe('CSOT', function () { @@ -124,53 +130,45 @@ describe('Client-Side Encryption (Integration)', function () { }; describe('#markCommand', function () { - context.skip('when provided timeoutContext and command hangs', function () { + context('when provided timeoutContext and command hangs', function () { let encryptedClient; - let setupClient; beforeEach(async function () { encryptedClient = this.configuration.newClient( {}, { autoEncryption: { + extraOptions: { + mongocryptdBypassSpawn: true, + mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', + mongocryptdSpawnArgs: [ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27017' + ] + }, keyVaultNamespace: 'admin.datakeys', kmsProviders: { aws: { accessKeyId: 'example', secretAccessKey: 'example' }, local: { key: Buffer.alloc(96) } } }, - timeoutMS: 1000 + timeoutMS: 500 } ); await encryptedClient.connect(); - setupClient = this.configuration.newClient(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'alwaysOn', - data: { - failCommands: ['ping'], - errorCode: 89 - } - } as FailPoint); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); }); afterEach(async function () { await encryptedClient?.close(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'off', - data: { - failCommands: ['ping'], - errorCode: 89 - } - } as FailPoint); - await setupClient.close(); + sinon.restore(); }); it('the command should fail due to a timeout error', async function () { @@ -188,7 +186,9 @@ describe('Client-Side Encryption (Integration)', function () { context('when not provided timeoutContext and command hangs', function () { let encryptedClient; - let setupClient; + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + let sleep; beforeEach(async function () { encryptedClient = this.configuration.newClient( @@ -204,41 +204,40 @@ describe('Client-Side Encryption (Integration)', function () { } ); await encryptedClient.connect(); - setupClient = this.configuration.newClient(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'alwaysOn', - data: { - failCommands: ['ping'], - errorCode: 89 - } - } as FailPoint); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); }); afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } await encryptedClient?.close(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'off', - data: { - failCommands: ['ping'], - errorCode: 89 - } - } as FailPoint); - await setupClient.close(); }); - it('the command should fail due to a server error', async function () { - const err = await stateMachine - .markCommand(encryptedClient, 'test.test', BSON.serialize({ ping: 1 })) - .catch(e => e); - expect(err).to.be.instanceOf(MongoServerError); + it('the command should not fail due to a timeout error within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([ + stateMachine.markCommand(encryptedClient, 'test.test', BSON.serialize({ ping: 1 })), + sleepingFn() + ]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 80f560ac617..2755dbb8996 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -13,16 +13,16 @@ import { promisify } from 'util'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { + Connection, ConnectionPool, - CryptoConnection, CSOTTimeoutContext, type MongoClient, MongoOperationTimeoutError, - squashError, Timeout, TimeoutContext, Topology } from '../../mongodb'; +import { sleep } from '../../tools/utils'; import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests @@ -184,44 +184,64 @@ describe('CSOT spec unit tests', function () { }); describe('Auto Encryption', function () { - let client; - let spy; + context('when provided timeoutMS and command hangs', function () { + let encryptedClient; - beforeEach(async function () { - spy = sinon.spy(CryptoConnection.prototype, 'command'); - }); - - afterEach(async function () { - await client?.close(); - sinon.restore(); - }); - - context('when client is provided timeoutMS', function () { - it('should pass timeoutMS into commands sent to mongocryptd', async function () { - client = this.configuration.newClient( + beforeEach(async function () { + encryptedClient = this.configuration.newClient( {}, { autoEncryption: { + extraOptions: { + mongocryptdBypassSpawn: true, + mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', + mongocryptdSpawnArgs: [ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27017' + ] + }, keyVaultNamespace: 'admin.datakeys', kmsProviders: { aws: { accessKeyId: 'example', secretAccessKey: 'example' }, local: { key: Buffer.alloc(96) } } }, - timeoutMS: 10000 + timeoutMS: 500 } ); - await client + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); + + it('the command should fail due to a timeout error', async function () { + const err = await encryptedClient .db() .command({ ping: 1 }) - .catch(e => squashError(e)); - expect(spy.getCalls()[2].args[2].timeoutMS).to.exist; + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); }); }); - context('when client is not provided timeoutMS', function () { - it('should pass timeoutMS into commands sent to mongocryptd', async function () { - client = this.configuration.newClient( + context('when not provided timeoutMS and command hangs', function () { + let encryptedClient; + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + let sleep; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( {}, { autoEncryption: { @@ -233,11 +253,40 @@ describe('CSOT spec unit tests', function () { } } ); - await client - .db() - .command({ ping: 1 }) - .catch(e => squashError(e)); - expect(spy.getCalls()[2].args[2].timeoutMS).to.not.exist; + await encryptedClient.connect(); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + await encryptedClient?.close(); + }); + + it('the command should not fail due to a timeout error within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([encryptedClient.db().command({ ping: 1 }), sleepingFn()]).catch( + e => e + ); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); }); }); }); From 9c478ab911774c631f06427c488dc770853412e6 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 8 Oct 2024 16:15:12 -0400 Subject: [PATCH 069/136] only >4.2 servers --- .../client-side-encryption/client_side_encryption.test.ts | 2 +- .../client_side_operations_timeout.spec.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 9e94381af25..4373f92075b 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -18,7 +18,7 @@ import { createTimerSandbox } from '../../unit/timer_sandbox'; describe('Client-Side Encryption (Integration)', function () { describe('CSOT', function () { - describe('Auto encryption', function () { + describe('Auto encryption', { requires: { mongodb: '>=4.2' } }, function () { let setupClient; beforeEach(async function () { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index d72e9bc5ebe..02dfce633d9 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -33,7 +33,7 @@ const skippedTests = { 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs' // Skipping for both tailable awaitData and tailable non-awaitData cursors }; -describe('CSOT spec tests', function () { +describe.only('CSOT spec tests', function () { const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { From 1d9034b42fe58a8bac8a4d4b8e8300269f7584ef Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 8 Oct 2024 16:47:30 -0400 Subject: [PATCH 070/136] remove stray only --- .../client_side_operations_timeout.spec.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 02dfce633d9..d72e9bc5ebe 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -33,7 +33,7 @@ const skippedTests = { 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs' // Skipping for both tailable awaitData and tailable non-awaitData cursors }; -describe.only('CSOT spec tests', function () { +describe('CSOT spec tests', function () { const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { From a31e7bfb09e64f5f1ced3566a9df70e5fb89e9f1 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 11 Apr 2024 17:15:41 -0400 Subject: [PATCH 071/136] feat(NODE-6090): Implement CSOT logic for connection checkout and server selection --- src/admin.ts | 3 +- src/cmap/connection.ts | 4 + src/cmap/connection_pool.ts | 53 ++- src/collection.ts | 5 + src/db.ts | 6 + src/error.ts | 9 + src/index.ts | 1 + src/operations/command.ts | 2 + src/operations/find.ts | 3 +- src/operations/operation.ts | 8 + src/operations/run_command.ts | 9 +- src/sdam/server.ts | 3 +- src/sdam/topology.ts | 54 ++- src/timeout.ts | 14 + src/utils.ts | 10 + ...ient_side_operations_timeout.prose.test.ts | 315 +++++++++++++----- ...lient_side_operations_timeout.unit.test.ts | 140 +++++--- .../node_csot.test.ts | 75 ++++- test/unit/cmap/connection_pool.test.js | 33 +- test/unit/index.test.ts | 1 + 20 files changed, 570 insertions(+), 178 deletions(-) diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..e030384eafc 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 0837c54d3fa..507b95b0b98 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,6 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type Timeout } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -94,6 +95,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeout?: Timeout; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..79440db1e06 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,13 +21,14 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; +import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,37 +355,57 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { - const checkoutTime = now(); + async checkOut(options?: { timeout?: Timeout }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + let timeout: Timeout | null = null; + if (options?.timeout) { + // CSOT enabled + // Determine if we're using the timeout passed in or a new timeout + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + // This check determines whether or not Topology.selectServer used the configured + // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + // server selection used `timeoutMS`, so we should use the existing timeout as the timeout + // here + timeout = options.timeout; + } else { + // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with + // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut + // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking + timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); + } + } + } else { + timeout = Timeout.expires(waitQueueTimeoutMS); + } const waitQueueMember: WaitQueueMember = { resolve, - reject, - timeout, - checkoutTime + reject }; this[kWaitQueue].push(waitQueueMember); process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +416,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options?.timeout) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (timeout !== options?.timeout) timeout?.clear(); } } @@ -764,7 +791,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +811,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +853,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..dbd91371cce 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -262,6 +262,11 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior diff --git a/src/db.ts b/src/db.ts index 53c18e44af6..6e1aa194acf 100644 --- a/src/db.ts +++ b/src/db.ts @@ -222,6 +222,11 @@ export class Db { return this.s.namespace.toString(); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -272,6 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/error.ts b/src/error.ts index a9178389486..f0441426feb 100644 --- a/src/error.ts +++ b/src/error.ts @@ -857,6 +857,15 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @internal + */ +export class MongoOperationTimeoutError extends MongoRuntimeError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/index.ts b/src/index.ts index 9538ce1d5cc..13df3e8c437 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, diff --git a/src/operations/command.ts b/src/operations/command.ts index 94ccc6ceafe..c64b4ae963a 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -65,6 +65,7 @@ export interface OperationParent { writeConcern?: WriteConcern; readPreference?: ReadPreference; bsonOptions?: BSONSerializeOptions; + timeoutMS?: number; } /** @internal */ @@ -131,6 +132,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeout: this.timeout, readPreference: this.readPreference, session }; diff --git a/src/operations/find.ts b/src/operations/find.ts index a040af73bc6..0f81f2d61f2 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -116,7 +116,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeout: this.timeout }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 12f168b76e3..e08d25bfec0 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type Timeout } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -57,6 +58,11 @@ export abstract class AbstractOperation { options: OperationOptions; + /** @internal */ + timeout?: Timeout; + /** @internal */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -74,6 +80,8 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; + + this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..56462fa8843 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -14,6 +14,8 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** @internal */ + timeoutMS?: number; } & BSONSerializeOptions; /** @internal */ @@ -39,10 +41,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }, this.options.responseType ); + return res; } } @@ -68,7 +72,8 @@ export class RunAdminCommandOperation extends AbstractOperation const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }); return res; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 4c1d37519ad..3d2a3ca1a31 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,7 +311,7 @@ export class Server extends TypedEventEmitter { this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } @@ -336,6 +336,7 @@ export class Server extends TypedEventEmitter { operationError.code === MONGODB_ERROR_CODES.Reauthenticate ) { await this.pool.reauthenticate(conn); + // TODO(NODE-5682): Implement CSOT support for socket read/write at the connection layer try { const res = await conn.command(ns, cmd, finalOptions, responseType); throwIfWriteConcernError(res); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..4c9d71d807d 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -37,6 +38,7 @@ import { Timeout, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, + csotMin, type EventEmitterWithState, HostAddress, List, @@ -107,7 +109,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -457,8 +458,14 @@ export class Topology extends TypedEventEmitter { } } + const timeoutMS = this.client.options.timeoutMS; + const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const selectServerOptions = { + operationName: 'ping', + timeout, + ...options + }; try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), @@ -467,7 +474,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +563,25 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; + let timeout: Timeout | null; + if (options.timeout) { + // CSOT Enabled + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + timeout = options.timeout; + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } + } else { + timeout = null; + } + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +604,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (timeout !== options.timeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +617,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +627,14 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +654,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeout) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (timeout !== options.timeout) timeout?.clear(); } } /** @@ -889,8 +922,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +975,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1062,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..7af1a23f261 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -40,6 +40,16 @@ export class Timeout extends Promise { public duration: number; public timedOut = false; + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } + /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = false) { let reject!: Reject; @@ -78,6 +88,10 @@ export class Timeout extends Promise { this.id = undefined; } + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out'); + } + public static expires(durationMS: number, unref?: boolean): Timeout { return new Timeout(undefined, durationMS, unref); } diff --git a/src/utils.ts b/src/utils.ts index 5ad754c9321..ebc0784cb1f 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -544,6 +544,10 @@ export function resolveOptions( result.readPreference = readPreference; } + const timeoutMS = options?.timeoutMS; + + result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + return result; } @@ -1379,6 +1383,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..903ea9c3bb4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,30 @@ /* Specification prose tests */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now +} from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + context.skip('1. Multi-batch writes', () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -31,7 +53,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { + context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { /** * This test MUST only be run against enterprise server versions 4.2 and higher. * @@ -42,7 +64,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('3. ClientEncryption', () => { + context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, * `LOCAL_MASTERKEY` refers to the following base64: @@ -132,7 +154,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('4. Background Connection Pooling', () => { + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -192,7 +214,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('5. Blocking Iteration Methods', () => { + context.skip('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -251,7 +273,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('6. GridFS - Upload', () => { + context.skip('6. GridFS - Upload', () => { /** Tests in this section MUST only be run against server versions 4.4 and higher. */ context('uploads via openUploadStream can be timed out', () => { @@ -306,7 +328,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('7. GridFS - Download', () => { + context.skip('7. GridFS - Download', () => { /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -351,96 +373,225 @@ describe.skip('CSOT spec prose tests', () => { }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }); + }); + + it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); + + it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); }); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + context.skip('9. endSession', () => { /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -472,7 +623,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('10. Convenient Transactions', () => { + context.skip('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..c1426d8db1d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,105 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(Timeout.expires).to.have.been.calledWith(10000); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + // Check that we passed through the timeout + expect(checkoutSpy.firstCall.args[0].timeout).to.equal( + selectServerSpy.lastCall.lastArg.timeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + context.skip('Client side encryption', function () { + context( + 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', + () => {} + ); + + context( + 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', + () => {} + ); + }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..5636eb00db7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -7,7 +7,9 @@ import { type Collection, type Db, type FindCursor, - type MongoClient + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoOperationTimeoutError } from '../../mongodb'; describe('CSOT driver tests', () => { @@ -94,4 +96,75 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..18048befab4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,7 +5,7 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); @@ -26,6 +26,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -98,7 +101,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -114,23 +117,15 @@ describe('Connection Pool', function () { pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut(); + const err = await pool.checkOut().catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index 595f372c43d..a1e8f22e37d 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -110,6 +110,7 @@ const EXPECTED_EXPORTS = [ 'MongoTailableCursorError', 'MongoTopologyClosedError', 'MongoTransactionError', + 'MongoOperationTimeoutError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', 'WriteConcernErrorResult', From ecaedfa26de1fd42d14fccc1198ca51b0d009d4f Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 10 Jun 2024 10:46:02 -0400 Subject: [PATCH 072/136] test(NODE-6120): Implement Unified test runner changes for CSOT (#4121) --- test/spec/unified-test-format/Makefile | 37 +++++- .../collectionData-createOptions.yml | 7 +- .../valid-pass/createEntities-operation.json | 74 ++++++++++++ .../valid-pass/createEntities-operation.yml | 38 ++++++ .../valid-pass/entity-cursor-iterateOnce.json | 111 ++++++++++++++++++ .../valid-pass/entity-cursor-iterateOnce.yml | 59 ++++++++++ .../valid-pass/entity-find-cursor.json | 15 ++- .../valid-pass/entity-find-cursor.yml | 6 +- ...ectedEventsForClient-ignoreExtraEvents.yml | 2 +- .../valid-pass/matches-lte-operator.json | 78 ++++++++++++ .../valid-pass/matches-lte-operator.yml | 41 +++++++ .../valid-pass/poc-change-streams.json | 36 ++++++ .../valid-pass/poc-change-streams.yml | 18 +++ .../valid-pass/poc-crud.json | 2 +- .../valid-pass/poc-crud.yml | 2 +- .../valid-pass/poc-sessions.json | 2 +- .../valid-pass/poc-sessions.yml | 3 +- .../poc-transactions-convenient-api.json | 2 +- .../poc-transactions-convenient-api.yml | 2 +- .../poc-transactions-mongos-pin-auto.json | 2 +- .../poc-transactions-mongos-pin-auto.yml | 2 +- .../valid-pass/poc-transactions.json | 6 +- .../valid-pass/poc-transactions.yml | 6 +- test/tools/unified-spec-runner/match.ts | 32 ++++- test/tools/unified-spec-runner/schema.ts | 1 + 25 files changed, 547 insertions(+), 37 deletions(-) create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.yml create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.json create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.yml diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index bb4ba99a449..4d37fce9ac8 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -25,6 +25,7 @@ import { MongoBulkWriteError, MongoClientBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -98,6 +99,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -106,7 +120,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -117,7 +132,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -130,7 +146,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -390,6 +407,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -759,6 +779,12 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; From e04694c3ba971afcf5381d207eb0575f581d4418 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 21 Jun 2024 12:06:30 -0400 Subject: [PATCH 073/136] refactor(NODE-6187): refactor to use TimeoutContext abstraction (#4131) --- src/bulk/common.ts | 4 + src/cmap/connection.ts | 4 +- src/cmap/connection_pool.ts | 39 +--- src/index.ts | 18 +- src/operations/aggregate.ts | 5 +- src/operations/bulk_write.ts | 11 +- src/operations/command.ts | 8 +- src/operations/count.ts | 9 +- src/operations/create_collection.ts | 18 +- src/operations/delete.ts | 21 +- src/operations/distinct.ts | 9 +- src/operations/drop.ts | 24 ++- src/operations/estimated_document_count.ts | 9 +- src/operations/execute_operation.ts | 16 +- src/operations/find.ts | 6 +- src/operations/find_and_modify.ts | 9 +- src/operations/get_more.ts | 5 +- src/operations/indexes.ts | 22 +- src/operations/insert.ts | 19 +- src/operations/kill_cursors.ts | 12 +- src/operations/list_collections.ts | 5 +- src/operations/list_databases.ts | 11 +- src/operations/operation.ts | 10 +- src/operations/profiling_level.ts | 9 +- src/operations/remove_user.ts | 9 +- src/operations/rename.ts | 9 +- src/operations/run_command.ts | 17 +- src/operations/search_indexes/create.ts | 12 +- src/operations/search_indexes/drop.ts | 9 +- src/operations/search_indexes/update.ts | 9 +- src/operations/set_profiling_level.ts | 6 +- src/operations/stats.ts | 9 +- src/operations/update.ts | 24 ++- src/operations/validate_collection.ts | 9 +- src/sdam/server.ts | 12 +- src/sdam/topology.ts | 55 +++-- src/timeout.ts | 166 +++++++++++++- ...lient_side_operations_timeout.unit.test.ts | 12 +- .../node_csot.test.ts | 2 +- test/tools/cmap_spec_runner.ts | 12 +- test/unit/cmap/connection_pool.test.js | 22 +- test/unit/error.test.ts | 19 +- test/unit/operations/get_more.test.ts | 2 +- test/unit/sdam/topology.test.ts | 76 +++++-- test/unit/timeout.test.ts | 204 +++++++++++++++++- 45 files changed, 796 insertions(+), 202 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index a62d62a4a5c..dc0bcfb513f 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -19,6 +19,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, getTopology, @@ -842,6 +843,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507b95b0b98..f7bb1789b7c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,7 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type TimeoutContext } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -97,7 +97,7 @@ export interface CommandOptions extends BSONSerializeOptions { directConnection?: boolean; /** @internal */ - timeout?: Timeout; + timeoutContext?: TimeoutContext; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 79440db1e06..5369cc155aa 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -27,8 +27,8 @@ import { } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type TimeoutContext, TimeoutError } from '../timeout'; +import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -355,41 +355,15 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(options?: { timeout?: Timeout }): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - let timeout: Timeout | null = null; - if (options?.timeout) { - // CSOT enabled - // Determine if we're using the timeout passed in or a new timeout - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - // This check determines whether or not Topology.selectServer used the configured - // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - // server selection used `timeoutMS`, so we should use the existing timeout as the timeout - // here - timeout = options.timeout; - } else { - // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with - // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut - // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking - timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); - } - } - } else { - timeout = Timeout.expires(waitQueueTimeoutMS); - } + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, @@ -404,6 +378,7 @@ export class ConnectionPool extends TypedEventEmitter { return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; this.emitAndLog( @@ -416,7 +391,7 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); - if (options?.timeout) { + if (options.timeoutContext.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during connection checkout', { cause: timeoutError }); @@ -425,7 +400,7 @@ export class ConnectionPool extends TypedEventEmitter { } throw error; } finally { - if (timeout !== options?.timeout) timeout?.clear(); + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } diff --git a/src/index.ts b/src/index.ts index 13df3e8c437..693fcf03493 100644 --- a/src/index.ts +++ b/src/index.ts @@ -566,7 +566,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -597,7 +603,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index a5a267ac3e4..50494cbba73 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -3,6 +3,7 @@ import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/r import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -97,7 +98,8 @@ export class AggregateOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -142,6 +144,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/command.ts b/src/operations/command.ts index c64b4ae963a..5bd80f796d1 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -7,6 +7,7 @@ import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { commandSupportsReadConcern, decorateWithExplain, @@ -112,19 +113,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -132,7 +136,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, - timeout: this.timeout, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..82330a11e76 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -36,7 +37,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +64,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..afb2680b9a0 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -124,7 +125,11 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; @@ -155,7 +160,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +168,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +178,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +186,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +204,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..787bb6e7d0f 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index ec7c233eeca..0cffa0c35f7 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,7 +24,8 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { supportsRetryableWrites } from '../utils'; +import { TimeoutContext } from '../timeout'; +import { squashError, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -86,6 +87,12 @@ export async function executeOperation< ); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -109,7 +116,8 @@ export async function executeOperation< return await tryOperation(operation, { topology, session, - readPreference + readPreference, + timeoutContext }); } finally { if (session?.owner != null && session.owner === owner) { @@ -268,7 +276,7 @@ async function tryOperation< if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) { operation.resetBatch(); } - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; diff --git a/src/operations/find.ts b/src/operations/find.ts index 0f81f2d61f2..5f359324d56 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -5,6 +5,7 @@ import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -98,7 +99,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -117,7 +119,7 @@ export class FindOperation extends CommandOperation { ...this.bsonOptions, documentsReturnedIn: 'firstBatch', session, - timeout: this.timeout + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..c96a5d73453 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,9 +349,13 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } @@ -379,7 +388,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +403,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..702db0fe3f2 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -54,12 +55,14 @@ export class ListCollectionsOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index e08d25bfec0..8558af7a4e5 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,7 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type Timeout, type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -80,15 +80,17 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; - - this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index 56462fa8843..b91e2d0344e 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -33,7 +34,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -42,7 +47,7 @@ export class RunCommandOperation extends AbstractOperation { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }, this.options.responseType ); @@ -67,13 +72,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 7e5e55d18d6..9661026e3eb 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -32,14 +33,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index 4e287cca012..e9ea0ad01ce 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -4,6 +4,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -31,7 +36,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index aad7f93536c..e88e777d675 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -27,7 +32,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 3d2a3ca1a31..08325086d53 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4c9d71d807d..6117b5317cd 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -34,11 +34,10 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, - csotMin, type EventEmitterWithState, HostAddress, List, @@ -179,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-5685): Make this required + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -458,13 +460,20 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.options.timeoutMS; - const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; + const timeoutMS = this.client.s.options.timeoutMS; + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; + + const timeoutContext = TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { operationName: 'ping', - timeout, - ...options + ...options, + timeoutContext }; try { const server = await this.selectServer( @@ -474,7 +483,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -563,24 +572,10 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } - const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; - let timeout: Timeout | null; - if (options.timeout) { - // CSOT Enabled - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - timeout = options.timeout; - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); - } - } else { - timeout = null; - } - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); } const isSharded = this.description.type === TopologyType.Sharded; @@ -604,7 +599,7 @@ export class Topology extends TypedEventEmitter { ) ); } - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } @@ -654,7 +649,7 @@ export class Topology extends TypedEventEmitter { ); } - if (options.timeout) { + if (options.timeoutContext?.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during server selection', { cause: timeoutError }); @@ -664,7 +659,7 @@ export class Topology extends TypedEventEmitter { // Other server selection error throw error; } finally { - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** diff --git a/src/timeout.ts b/src/timeout.ts index 7af1a23f261..3d65992a02b 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,7 +1,7 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { @@ -107,3 +107,165 @@ export class Timeout extends Promise { ); } } + +/** @internal */ +export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract csotEnabled(): this is CSOTTimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _maxTimeMS?: number; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this._maxTimeMS ?? -1; + } + + set maxTimeMS(v: number) { + this._maxTimeMS = v; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object') { + const usingServerSelectionTimeoutMS = + this.serverSelectionTimeoutMS !== 0 && + csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; + + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + } else { + if (this.timeoutMS > 0) { + this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (typeof this._connectionCheckoutTimeout !== 'object') { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } +} diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c1426d8db1d..c4989f58d7f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -33,16 +33,20 @@ describe('CSOT spec unit tests', function () { client = this.configuration.newClient({ timeoutMS: 1000 }); // Spy on connection checkout and pull options argument const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); - const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); const expiresSpy = sinon.spy(Timeout, 'expires'); await client.db('db').collection('collection').insertOne({ x: 1 }); expect(checkoutSpy).to.have.been.calledOnce; - expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; // Check that we passed through the timeout - expect(checkoutSpy.firstCall.args[0].timeout).to.equal( - selectServerSpy.lastCall.lastArg.timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout ); // Check that no more Timeouts are constructed after we enter checkout diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 5636eb00db7..17d85ba5b23 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -143,7 +143,7 @@ describe('CSOT driver tests', () => { }); it('throws a MongoOperationTimeoutError', { - metadata: { requires: { mongodb: '>=4.4' } }, + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index f6d7e68bedc..9bb2abdb87a 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -12,7 +12,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -185,7 +186,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 18048befab4..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -10,8 +10,10 @@ const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -44,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -64,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -93,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -114,11 +122,15 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - const conn = await pool.checkOut(); - const err = await pool.checkOut().catch(e => e); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); sinon.stub(pool, 'availableConnectionCount').get(() => 0); pool.checkIn(conn); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..bdc049cbc4f 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -28,6 +28,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -376,11 +377,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +426,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..17bc20f6fa7 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -69,7 +69,7 @@ describe('GetMoreOperation', function () { const call = stub.getCall(0); expect(call.args[0]).to.equal(namespace); expect(call.args[1]).to.deep.equal(expectedGetMoreCommand); - expect(call.args[2]).to.deep.equal(opts); + expect(call.args[2]).to.containSubset(opts); }); }); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..119d0516a9c 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,6 +1,14 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; @@ -115,3 +123,197 @@ describe('Timeout', function () { }); }); }); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); + }); + }); + + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); + }); + }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); + }); + }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); + }); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); + + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); + }); + }); + }); +}); From 0a23f20778804e726a12e8b2e975e1a2ca0c97fb Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 22 Jul 2024 11:17:22 -0400 Subject: [PATCH 074/136] refactor(NODE-6230): executeOperation to use iterative retry mechanism (#4157) --- src/cmap/connection_pool.ts | 6 ++++-- src/operations/execute_operation.ts | 27 ++++++++++++++++----------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5369cc155aa..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -28,7 +28,7 @@ import { import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { type TimeoutContext, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -356,6 +356,7 @@ export class ConnectionPool extends TypedEventEmitter { * explicitly destroyed by the new owner. */ async checkOut(options: { timeoutContext: TimeoutContext }): Promise { + const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) @@ -367,7 +368,8 @@ export class ConnectionPool extends TypedEventEmitter { const waitQueueMember: WaitQueueMember = { resolve, - reject + reject, + checkoutTime }; this[kWaitQueue].push(waitQueueMember); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 0cffa0c35f7..15cad8c32a7 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -25,7 +25,7 @@ import { import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import { TimeoutContext } from '../timeout'; -import { squashError, supportsRetryableWrites } from '../utils'; +import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -87,12 +87,6 @@ export async function executeOperation< ); } - timeoutContext ??= TimeoutContext.create({ - serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, - waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, - timeoutMS: operation.options.timeoutMS - }); - const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -112,12 +106,18 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, - readPreference, - timeoutContext + readPreference }); } finally { if (session?.owner != null && session.owner === owner) { @@ -156,6 +156,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -179,7 +180,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -197,7 +201,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); From f3e190f14cbfbf83415609f3e97e33be24d7704e Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 26 Jul 2024 09:55:20 -0400 Subject: [PATCH 075/136] feat(NODE-5682): set maxTimeMS on commands and preempt I/O (#4174) Co-authored-by: Warren James --- src/admin.ts | 5 +- src/cmap/connection.ts | 66 ++++++++++++++++--- src/cmap/wire_protocol/on_data.ts | 17 ++++- src/db.ts | 2 +- src/sdam/topology.ts | 17 +++-- src/timeout.ts | 43 ++++++++++-- ...ient_side_operations_timeout.prose.test.ts | 20 +++--- ...lient_side_operations_timeout.spec.test.ts | 33 +++++++++- .../node_csot.test.ts | 1 - test/integration/node-specific/db.test.js | 22 ++----- test/spec/{index.js => index.ts} | 19 ++---- test/tools/cmap_spec_runner.ts | 3 +- test/tools/unified-spec-runner/entities.ts | 4 +- test/tools/unified-spec-runner/match.ts | 15 ++++- test/tools/unified-spec-runner/operations.ts | 8 +-- test/unit/tools/unified_spec_runner.test.ts | 2 +- 16 files changed, 200 insertions(+), 77 deletions(-) rename test/spec/{index.js => index.ts} (67%) diff --git a/src/admin.ts b/src/admin.ts index e030384eafc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -155,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index f7bb1789b7c..010bcb8c897 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -19,6 +19,7 @@ import { MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -30,7 +31,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type TimeoutContext } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -419,6 +420,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (options.timeoutContext?.csotEnabled()) { + const { maxTimeMS } = options.timeoutContext; + if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -433,7 +439,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -442,7 +450,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse || message.moreToCome) { @@ -452,7 +461,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -629,7 +648,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -641,8 +664,32 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + if (TimeoutError.is(error)) { + throw new MongoOperationTimeoutError('Timed out at socket write'); + } + throw error; + } + } + return await drainEvent; } /** @@ -654,10 +701,13 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); this.messageStream.resume(); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..a32c6b1b484 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,7 @@ import { type EventEmitter } from 'events'; +import { MongoOperationTimeoutError } from '../../error'; +import { type TimeoutContext, TimeoutError } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +20,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -86,6 +91,8 @@ export function onData(emitter: EventEmitter) { // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + // eslint-disable-next-line github/no-then + timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -97,8 +104,12 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - if (promise != null) promise.reject(err); - else error = err; + const timeoutError = TimeoutError.is(err) + ? new MongoOperationTimeoutError('Timed out during socket read') + : undefined; + + if (promise != null) promise.reject(timeoutError ?? err); + else error = timeoutError ?? err; void closeHandler(); } diff --git a/src/db.ts b/src/db.ts index 6e1aa194acf..48501bc497e 100644 --- a/src/db.ts +++ b/src/db.ts @@ -277,7 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS, + timeoutMS: options?.timeoutMS ?? this.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6117b5317cd..479003f0e35 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -460,29 +460,28 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.s.options.timeoutMS; + // TODO(NODE-6223): auto connect cannot use timeoutMS + // const timeoutMS = this.client.s.options.timeoutMS; const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const timeoutContext = TimeoutContext.create({ - timeoutMS, + timeoutMS: undefined, serverSelectionTimeoutMS, waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS }); - const selectServerOptions = { operationName: 'ping', ...options, timeoutContext }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { + if (!skipPingOnConnect && this.s.credentials) { await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); @@ -623,7 +622,11 @@ export class Topology extends TypedEventEmitter { try { timeout?.throwIfExpired(); - return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout diff --git a/src/timeout.ts b/src/timeout.ts index 3d65992a02b..cc90b8c2e72 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,6 +1,6 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { csotMin, noop } from './utils'; /** @internal */ @@ -51,7 +51,7 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { + private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; if (duration < 0) { @@ -163,6 +163,10 @@ export abstract class TimeoutContext { abstract get clearConnectionCheckoutTimeout(): boolean; + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + abstract csotEnabled(): this is CSOTTimeoutContext; } @@ -175,13 +179,15 @@ export class CSOTTimeoutContext extends TimeoutContext { clearConnectionCheckoutTimeout: boolean; clearServerSelectionTimeout: boolean; - private _maxTimeMS?: number; - private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + private start: number; constructor(options: CSOTTimeoutContextOptions) { super(); + this.start = Math.trunc(performance.now()); + this.timeoutMS = options.timeoutMS; this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; @@ -193,11 +199,12 @@ export class CSOTTimeoutContext extends TimeoutContext { } get maxTimeMS(): number { - return this._maxTimeMS ?? -1; + return this.remainingTimeMS - this.minRoundTripTime; } - set maxTimeMS(v: number) { - this._maxTimeMS = v; + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; } csotEnabled(): this is CSOTTimeoutContext { @@ -238,6 +245,20 @@ export class CSOTTimeoutContext extends TimeoutContext { } return this._connectionCheckoutTimeout; } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket write'); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket read'); + } } /** @internal */ @@ -268,4 +289,12 @@ export class LegacyTimeoutContext extends TimeoutContext { return Timeout.expires(this.options.waitQueueTimeoutMS); return null; } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 903ea9c3bb4..729bed42199 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -384,7 +384,7 @@ describe('CSOT spec prose tests', function () { clock.restore(); }); - it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. @@ -416,10 +416,11 @@ describe('CSOT spec prose tests', function () { await clock.tickAsync(11); expect(await maybeError).to.be.instanceof(MongoServerSelectionError); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -440,9 +441,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -462,9 +464,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -484,7 +487,8 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..f73f162204f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -3,7 +3,34 @@ import { join } from 'path'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const enabled = [ + 'override-collection-timeoutMS', + 'override-database-timeoutMS', + 'override-operation-timeoutMS' +]; + +const cursorOperations = [ + 'aggregate', + 'countDocuments', + 'listIndexes', + 'createChangeStream', + 'listCollections', + 'listCollectionNames' +]; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests(join('client-side-operations-timeout')); + for (const spec of specs) { + for (const test of spec.tests) { + // not one of the test suites listed in kickoff + if (!enabled.includes(spec.name)) { + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + + // Cursor operation + if (test.operations.find(operation => cursorOperations.includes(operation.name))) + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + } + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 17d85ba5b23..0c97b910836 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -48,7 +48,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 9bb2abdb87a..892f6311df5 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -427,7 +428,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..9f4e20a828e 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 4d37fce9ac8..5eb3af88759 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -174,7 +174,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -219,6 +220,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -372,7 +377,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -785,6 +790,12 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 9cc67174f3c..7a98c7ac978 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -303,6 +303,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); @@ -313,7 +314,7 @@ operations.set('drop', async ({ entities, operation }) => { operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -767,11 +768,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); }); From 61b2d807e2765aec5199cb1a8ac2d4c3c981bd95 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 1 Aug 2024 16:08:39 -0400 Subject: [PATCH 076/136] feat(NODE-6231): Add CSOT behaviour for retryable reads and writes (#4186) --- src/operations/execute_operation.ts | 9 ++++--- src/timeout.ts | 26 ++++++++++++------- ...lient_side_operations_timeout.spec.test.ts | 13 +++++++++- ...lient_side_operations_timeout.unit.test.ts | 10 +++++-- .../node_csot.test.ts | 5 ---- test/tools/unified-spec-runner/match.ts | 2 ++ 6 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 15cad8c32a7..cdddc1211a8 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -227,12 +227,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -284,7 +282,6 @@ async function tryOperation< return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -293,6 +290,10 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.serverSelectionTimeout?.clear(); + timeoutContext.connectionCheckoutTimeout?.clear(); } } diff --git a/src/timeout.ts b/src/timeout.ts index cc90b8c2e72..297a484b4ec 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -39,6 +39,7 @@ export class Timeout extends Promise { public ended: number | null = null; public duration: number; public timedOut = false; + public cleared = false; get remainingTime(): number { if (this.timedOut) return 0; @@ -53,7 +54,6 @@ export class Timeout extends Promise { /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; - if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } @@ -86,6 +86,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.cleared = true; } throwIfExpired(): void { @@ -213,16 +214,20 @@ export class CSOTTimeoutContext extends TimeoutContext { get serverSelectionTimeout(): Timeout | null { // check for undefined - if (typeof this._serverSelectionTimeout !== 'object') { + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError( + `Timed out in server selection after ${this.timeoutMS}ms` + ); const usingServerSelectionTimeoutMS = - this.serverSelectionTimeoutMS !== 0 && - csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; - + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; if (usingServerSelectionTimeoutMS) { - this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); } else { - if (this.timeoutMS > 0) { - this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); } else { this._serverSelectionTimeout = null; } @@ -233,7 +238,10 @@ export class CSOTTimeoutContext extends TimeoutContext { } get connectionCheckoutTimeout(): Timeout | null { - if (typeof this._connectionCheckoutTimeout !== 'object') { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { if (typeof this._serverSelectionTimeout === 'object') { // null or Timeout this._connectionCheckoutTimeout = this._serverSelectionTimeout; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index f73f162204f..e4c9eb3027c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -6,7 +6,9 @@ import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const enabled = [ 'override-collection-timeoutMS', 'override-database-timeoutMS', - 'override-operation-timeoutMS' + 'override-operation-timeoutMS', + 'retryability-legacy-timeouts', + 'retryability-timeoutMS' ]; const cursorOperations = [ @@ -18,6 +20,11 @@ const cursorOperations = [ 'listCollectionNames' ]; +const bulkWriteOperations = [ + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' +]; + describe('CSOT spec tests', function () { const specs = loadSpecTests(join('client-side-operations-timeout')); for (const spec of specs) { @@ -30,6 +37,10 @@ describe('CSOT spec tests', function () { // Cursor operation if (test.operations.find(operation => cursorOperations.includes(operation.name))) test.skipReason = 'TODO(NODE-5684): Not working yet'; + + if (bulkWriteOperations.includes(test.description)) + test.skipReason = + 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } runUnifiedSuite(specs); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c4989f58d7f..944d9b96048 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -7,7 +7,7 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; +import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -22,10 +22,16 @@ describe('CSOT spec unit tests', function () { it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); await client.db('db').collection('collection').insertOne({ x: 1 }); - expect(Timeout.expires).to.have.been.calledWith(10000); + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); expect(Timeout.expires).to.not.have.been.calledWith(999999); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 0c97b910836..63e2d97dd90 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,6 +1,5 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; -import * as sinon from 'sinon'; import { type ClientSession, @@ -13,10 +12,6 @@ import { } from '../../mongodb'; describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); - describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 5eb3af88759..ebcd2cfdd85 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -790,6 +790,8 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their + // errorResponse field if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { From 35a5eb4bf8e0b10bf1f1e12432c2ea5026e6bdc4 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 12 Aug 2024 16:46:06 -0400 Subject: [PATCH 077/136] feat(NODE-6312): add error transformation for server timeouts (#4192) --- src/cmap/connection.ts | 29 ++++ src/cmap/wire_protocol/responses.ts | 36 +++- .../node_csot.test.ts | 163 +++++++++++++++++- 3 files changed, 225 insertions(+), 3 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 010bcb8c897..ecc5ca9c0c7 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -16,6 +16,7 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, @@ -540,6 +541,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -613,6 +619,29 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index 18afde92e72..a56016cf578 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -11,7 +11,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -111,6 +111,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 63e2d97dd90..d7d4a4ede5a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,17 +1,23 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; import { + BSON, type ClientSession, type Collection, + Connection, type Db, type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, - MongoOperationTimeoutError + MongoOperationTimeoutError, + MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', () => { +describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -161,4 +167,157 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded; + let commandsFailed; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command failed', async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + }); + }); + + afterEach(() => sinon.restore()); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + }); + }); + }); }); From a9c922426ad8c34f8b8230cbfd0baf19b912f3e3 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 9 Sep 2024 11:11:11 -0400 Subject: [PATCH 078/136] feat(NODE-6313): add CSOT support to sessions and transactions (#4199) --- package-lock.json | 82 +++--- package.json | 2 +- src/cmap/connection.ts | 7 + src/cmap/wire_protocol/on_data.ts | 15 +- src/collection.ts | 12 +- src/db.ts | 22 +- src/error.ts | 3 + src/operations/execute_operation.ts | 8 +- src/sessions.ts | 255 ++++++++++++------ src/timeout.ts | 49 +++- src/transactions.ts | 7 +- src/utils.ts | 13 +- ...ient_side_operations_timeout.prose.test.ts | 167 +++++++++++- ...lient_side_operations_timeout.spec.test.ts | 18 +- .../node_csot.test.ts | 150 +++++++++++ .../sessions-inherit-timeoutMS.json | 28 +- .../sessions-inherit-timeoutMS.yml | 19 +- ...sessions-override-operation-timeoutMS.json | 32 ++- .../sessions-override-operation-timeoutMS.yml | 23 +- .../sessions-override-timeoutMS.json | 28 +- .../sessions-override-timeoutMS.yml | 19 +- test/tools/unified-spec-runner/entities.ts | 4 + test/tools/unified-spec-runner/match.ts | 19 +- test/tools/unified-spec-runner/operations.ts | 27 +- 24 files changed, 776 insertions(+), 233 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b07cd361d5..1d9cebf509b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", + "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index 479356905dc..2de0e1811f0 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index ecc5ca9c0c7..7ad367e6733 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -745,6 +745,13 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + if (TimeoutError.is(readError)) { + throw new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + } + throw readError; } finally { this.dataEvents = null; this.messageStream.pause(); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index a32c6b1b484..23fd88e2828 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,7 +1,6 @@ import { type EventEmitter } from 'events'; -import { MongoOperationTimeoutError } from '../../error'; -import { type TimeoutContext, TimeoutError } from '../../timeout'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -91,8 +90,11 @@ export function onData( // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); // eslint-disable-next-line github/no-then - timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); + timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -104,12 +106,9 @@ export function onData( function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - const timeoutError = TimeoutError.is(err) - ? new MongoOperationTimeoutError('Timed out during socket read') - : undefined; - if (promise != null) promise.reject(timeoutError ?? err); - else error = timeoutError ?? err; + if (promise != null) promise.reject(err); + else error = err; void closeHandler(); } diff --git a/src/collection.ts b/src/collection.ts index dbd91371cce..f3a206b0c7b 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -470,10 +470,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } diff --git a/src/db.ts b/src/db.ts index 48501bc497e..bd0b5450b8c 100644 --- a/src/db.ts +++ b/src/db.ts @@ -275,12 +275,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS ?? this.timeoutMS, - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -385,7 +389,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } diff --git a/src/error.ts b/src/error.ts index f0441426feb..2b973cccc38 100644 --- a/src/error.ts +++ b/src/error.ts @@ -128,6 +128,9 @@ function isAggregateError(e: unknown): e is Error & { errors: Error[] } { * mongodb-client-encryption has a dependency on this error, it uses the constructor with a string argument */ export class MongoError extends Error { + get [Symbol.toStringTag]() { + return this.name; + } /** @internal */ [kErrorLabels]: Set; /** diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index cdddc1211a8..2523058ecfd 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -58,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -81,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -107,6 +102,7 @@ export async function executeOperation< } timeoutContext ??= TimeoutContext.create({ + session, serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, timeoutMS: operation.options.timeoutMS diff --git a/src/sessions.ts b/src/sessions.ts index bad966ed71c..bbd1785275f 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,11 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +102,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** @internal */ + timeoutMS?: number; } /** @@ -115,7 +122,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -137,6 +144,9 @@ export class ClientSession /** @internal */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -149,7 +159,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -269,8 +279,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -286,10 +301,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -441,8 +452,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -492,8 +505,25 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (firstCommitError) { if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) { @@ -503,7 +533,7 @@ export class ClientSession this.unpin({ force: true }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (retryCommitError) { // If the retry failed, we process that error instead of the original @@ -535,8 +565,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -581,18 +616,45 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -647,96 +709,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - - let committed = false; - let result: any; - while (!committed) { - this.startTransaction(options); // may throw on error + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - result = await promise; + let committed = false; + let result: any; - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } + result = await promise; - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index 297a484b4ec..f057bdb90b4 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -52,12 +55,19 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = true) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; + if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -67,16 +77,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -90,11 +104,11 @@ export class Timeout extends Promise { } throwIfExpired(): void { - if (this.timedOut) throw new TimeoutError('Timed out'); + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); } static is(timeout: unknown): timeout is Timeout { @@ -107,10 +121,16 @@ export class Timeout extends Promise { typeof timeout.then === 'function' ); } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } } /** @internal */ -export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; /** @internal */ export type LegacyTimeoutContextOptions = { @@ -151,6 +171,7 @@ function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions /** @internal */ export abstract class TimeoutContext { static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); else throw new MongoRuntimeError('Unrecognized options'); @@ -183,7 +204,7 @@ export class CSOTTimeoutContext extends TimeoutContext { private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; public minRoundTripTime = 0; - private start: number; + public start: number; constructor(options: CSOTTimeoutContextOptions) { super(); @@ -217,8 +238,8 @@ export class CSOTTimeoutContext extends TimeoutContext { if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { const { remainingTimeMS, serverSelectionTimeoutMS } = this; if (remainingTimeMS <= 0) - throw new MongoOperationTimeoutError( - `Timed out in server selection after ${this.timeoutMS}ms` + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) ); const usingServerSelectionTimeoutMS = serverSelectionTimeoutMS !== 0 && @@ -258,14 +279,14 @@ export class CSOTTimeoutContext extends TimeoutContext { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket write'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); } get timeoutForSocketRead(): Timeout | null { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket read'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..db251c82c16 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; @@ -68,7 +68,10 @@ export interface TransactionOptions extends CommandOperationOptions { writeConcern?: WriteConcern; /** A default read preference for commands in this transaction */ readPreference?: ReadPreferenceLike; - /** Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds */ + /** + * Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds + * @deprecated This option is deprecated in favor of `timeoutMS` or `defaultTimeoutMS`. + */ maxCommitTimeMS?: number; } diff --git a/src/utils.ts b/src/utils.ts index ebc0784cb1f..04174813c9c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -517,6 +517,10 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -544,9 +548,14 @@ export function resolveOptions( result.readPreference = readPreference; } - const timeoutMS = options?.timeoutMS; + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } - result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + result.timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; return result; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 729bed42199..406aa53ed6a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,6 +1,7 @@ /* Specification prose tests */ import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { @@ -9,6 +10,7 @@ import { MongoServerSelectionError, now } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -595,7 +597,10 @@ describe('CSOT spec prose tests', function () { 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context.skip('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -625,12 +630,92 @@ describe('CSOT spec prose tests', function () { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context.skip('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -641,7 +726,7 @@ describe('CSOT spec prose tests', function () { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -658,6 +743,80 @@ describe('CSOT spec prose tests', function () { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index e4c9eb3027c..a178cecc5d2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,4 +1,5 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; @@ -8,7 +9,10 @@ const enabled = [ 'override-database-timeoutMS', 'override-operation-timeoutMS', 'retryability-legacy-timeouts', - 'retryability-timeoutMS' + 'retryability-timeoutMS', + 'sessions-override-operation-timeoutMS', + 'sessions-override-timeoutMS', + 'sessions-inherit-timeoutMS' ]; const cursorOperations = [ @@ -43,5 +47,15 @@ describe('CSOT spec tests', function () { 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } - runUnifiedSuite(specs); + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index d7d4a4ede5a..cc767c1d80a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -12,6 +12,7 @@ import { type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, + MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; @@ -320,4 +321,153 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { }); }); }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); }); diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 9f4e20a828e..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index ebcd2cfdd85..662746b4591 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -513,6 +513,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -527,9 +534,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -538,9 +543,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -554,9 +557,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 7a98c7ac978..5b5b7040698 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -19,6 +19,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +50,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -241,7 +237,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -371,7 +372,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -718,13 +719,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -945,7 +950,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -959,7 +964,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); From 2b9ef6fa9d354e949e02040dbc0868da96b8760e Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 11:35:27 -0400 Subject: [PATCH 079/136] feat(NODE-6304): add CSOT support for non-tailable cursors (#4195) --- src/cmap/connection.ts | 4 +- src/cmap/wire_protocol/on_data.ts | 1 - src/collection.ts | 6 +- src/cursor/abstract_cursor.ts | 146 +++- src/cursor/aggregation_cursor.ts | 20 +- src/cursor/change_stream_cursor.ts | 6 +- src/cursor/find_cursor.ts | 2 +- src/cursor/list_collections_cursor.ts | 2 +- src/cursor/list_indexes_cursor.ts | 2 +- src/cursor/run_command_cursor.ts | 14 +- src/index.ts | 2 +- src/operations/aggregate.ts | 4 + src/operations/execute_operation.ts | 3 +- src/operations/find.ts | 4 + src/operations/indexes.ts | 9 +- src/operations/list_collections.ts | 3 + src/operations/operation.ts | 3 + src/operations/run_command.ts | 2 + src/sessions.ts | 12 +- src/timeout.ts | 27 +- ...ient_side_operations_timeout.prose.test.ts | 84 ++- ...lient_side_operations_timeout.spec.test.ts | 83 ++- .../node_csot.test.ts | 335 ++++++++- .../command-execution.json | 153 ++++ .../client-side-operations-timeout/README.md | 661 ++++++++++++++++++ .../change-streams.json | 20 +- .../change-streams.yml | 30 +- .../close-cursors.json | 12 +- .../close-cursors.yml | 12 +- .../command-execution.json | 2 +- .../command-execution.yml | 5 +- .../convenient-transactions.json | 22 +- .../convenient-transactions.yml | 15 +- .../deprecated-options.json | 2 +- .../deprecated-options.yml | 2 +- .../gridfs-advanced.yml | 2 +- .../non-tailable-cursors.json | 20 +- .../non-tailable-cursors.yml | 32 +- .../retryability-timeoutMS.json | 250 +++++++ .../retryability-timeoutMS.yml | 100 +++ .../tailable-awaitData.json | 14 +- .../tailable-awaitData.yml | 18 +- .../tailable-non-awaitData.json | 10 +- .../tailable-non-awaitData.yml | 12 +- test/tools/unified-spec-runner/operations.ts | 7 +- test/unit/cursor/aggregation_cursor.test.ts | 67 +- 46 files changed, 2008 insertions(+), 234 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json create mode 100644 test/spec/client-side-operations-timeout/README.md diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 7ad367e6733..507615e9f03 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -86,6 +86,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -421,7 +422,7 @@ export class Connection extends TypedEventEmitter { ...options }; - if (options.timeoutContext?.csotEnabled()) { + if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { const { maxTimeMS } = options.timeoutContext; if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } @@ -621,7 +622,6 @@ export class Connection extends TypedEventEmitter { for await (const document of this.sendCommand(ns, command, options, responseType)) { if (options.timeoutContext?.csotEnabled()) { if (MongoDBResponse.is(document)) { - // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT if (document.isMaxTimeExpiredError) { throw new MongoOperationTimeoutError('Server reported a timeout error', { cause: new MongoServerError(document.toObject()) diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 23fd88e2828..64c636f41f1 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -93,7 +93,6 @@ export function onData( const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; timeoutForSocketRead?.throwIfExpired(); - // eslint-disable-next-line github/no-then timeoutForSocketRead?.then(undefined, errorHandler); return iterator; diff --git a/src/collection.ts b/src/collection.ts index f3a206b0c7b..a73a5276f5f 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -678,7 +678,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + if (error instanceof MongoOperationTimeoutError) throw error; // TODO: Check the spec for index management behaviour/file a drivers ticket for this + // Seems like we should throw all errors return false; } } diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..d0f386923ad 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,17 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** @public*/ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** @public + * TODO(NODE-5688): Document and release + * */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -105,6 +117,8 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { noCursorTimeout?: boolean; /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -117,6 +131,8 @@ export type InternalAbstractCursorOptions = Omit { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -658,6 +727,8 @@ export abstract class AbstractCursor< this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -707,7 +778,7 @@ export abstract class AbstractCursor< } ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,6 +789,12 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }); + } try { const state = await this._initialize(this.cursorSession); const response = state.response; @@ -729,7 +806,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -763,6 +840,7 @@ export abstract class AbstractCursor< // otherwise need to call getMore const batchSize = this.cursorOptions.batchSize || 1000; + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; try { const response = await this.getMore(batchSize); @@ -770,7 +848,7 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); + await this.cleanup(undefined, error); } catch (error) { // `cleanupCursor` should never throw, squash and throw the original error squashError(error); @@ -791,7 +869,7 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; try { @@ -806,11 +884,23 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + let timeoutContext: TimeoutContext | undefined; + if (timeoutMS != null) { + this.timeoutContext?.clear(); + timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }); + } else { + this.timeoutContext?.refresh(); + timeoutContext = this.timeoutContext; + } await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContext ); } } catch (error) { diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 9762c8a03bf..056f28454ce 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,4 +1,5 @@ import type { Document } from '../bson'; +import { MongoAPIError } from '../error'; import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; @@ -9,6 +10,7 @@ import { mergeOptions, type MongoDBNamespace } from '../utils'; import { AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -38,6 +40,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -60,7 +71,7 @@ export class AggregationCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } @@ -95,6 +106,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..13f58675552 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 83a12818bd0..96b764dc7ff 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -69,7 +69,7 @@ export class FindCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, findOperation); + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..6b31ce2263a 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,20 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** @internal */ + timeoutMS?: number; + /** @internal */ + timeoutMode?: CursorTimeoutMode; } & BSONSerializeOptions; /** @public */ @@ -105,7 +113,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +131,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/index.ts b/src/index.ts index 693fcf03493..e555d97e9ed 100644 --- a/src/index.ts +++ b/src/index.ts @@ -109,7 +109,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, type CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 50494cbba73..096fe372715 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -36,6 +37,9 @@ export interface AggregateOptions extends CommandOperationOptions { let?: Document; out?: string; + + /** @internal */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 2523058ecfd..f59df27569f 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -288,8 +288,7 @@ async function tryOperation< previousOperationError = operationError; // Reset timeouts - timeoutContext.serverSelectionTimeout?.clear(); - timeoutContext.connectionCheckoutTimeout?.clear(); + timeoutContext.clear(); } } diff --git a/src/operations/find.ts b/src/operations/find.ts index 5f359324d56..c39695cc0bc 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -64,6 +65,9 @@ export interface FindOptions * @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored. */ oplogReplay?: boolean; + + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index c96a5d73453..220d438d834 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,7 +1,7 @@ import type { Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Collection } from '../collection'; -import { type AbstractCursorOptions } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; @@ -360,7 +360,12 @@ export class DropIndexOperation extends CommandOperation { } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index 702db0fe3f2..50df243a3ff 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,5 +1,6 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -16,6 +17,8 @@ export interface ListCollectionsOptions extends Omit { public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; public cleared = false; get remainingTime(): number { @@ -100,6 +100,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; this.cleared = true; } @@ -190,6 +191,10 @@ export abstract class TimeoutContext { abstract get timeoutForSocketRead(): Timeout | null; abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; } /** @internal */ @@ -288,6 +293,18 @@ export class CSOTTimeoutContext extends TimeoutContext { if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } } /** @internal */ @@ -326,4 +343,12 @@ export class LegacyTimeoutContext extends TimeoutContext { get timeoutForSocketRead(): Timeout | null { return null; } + + refresh(): void { + return; + } + + clear(): void { + return; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 406aa53ed6a..0d36998fd96 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { type CommandStartedEvent } from '../../../mongodb'; import { + type CommandSucceededEvent, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -216,12 +218,52 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('5. Blocking Iteration Methods', () => { + context('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 20 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient.db('db').dropCollection('coll'); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -248,6 +290,29 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it.skip('send correct number of finds and getMores', async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true, awaitData: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6305)'; }); context('Change Streams', () => { @@ -272,6 +337,23 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it.skip('sends correct number of aggregate and getMores', async function () { + const changeStream = client.db('db').collection('coll').watch(); + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 1 getMore + expect(getMores).to.have.lengthOf(1); + }).skipReason = 'TODO(NODE-6305)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index a178cecc5d2..99914fa08e7 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -4,49 +4,55 @@ import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -const enabled = [ - 'override-collection-timeoutMS', - 'override-database-timeoutMS', - 'override-operation-timeoutMS', - 'retryability-legacy-timeouts', - 'retryability-timeoutMS', - 'sessions-override-operation-timeoutMS', - 'sessions-override-timeoutMS', - 'sessions-inherit-timeoutMS' -]; +const skippedSpecs = { + bulkWrite: 'TODO(NODE-6274)', + 'change-streams': 'TODO(NODE-6035)', + 'convenient-transactions': 'TODO(NODE-5687)', + 'deprecated-options': 'TODO(NODE-5689)', + 'gridfs-advanced': 'TODO(NODE-6275)', + 'gridfs-delete': 'TODO(NODE-6275)', + 'gridfs-download': 'TODO(NODE-6275)', + 'gridfs-find': 'TODO(NODE-6275)', + 'gridfs-upload': 'TODO(NODE-6275)', + 'tailable-awaitData': 'TODO(NODE-6035)', + 'tailable-non-awaitData': 'TODO(NODE-6035)' +}; -const cursorOperations = [ - 'aggregate', - 'countDocuments', - 'listIndexes', - 'createChangeStream', - 'listCollections', - 'listCollectionNames' -]; - -const bulkWriteOperations = [ - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' -]; +const skippedTests = { + 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': + 'TODO(NODE-6305)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': + 'TODO(NODE-6274)', + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': + 'TODO(NODE-6274)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' +}; describe('CSOT spec tests', function () { - const specs = loadSpecTests(join('client-side-operations-timeout')); + const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { - // not one of the test suites listed in kickoff - if (!enabled.includes(spec.name)) { - test.skipReason = 'TODO(NODE-5684): Not working yet'; + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; } - - // Cursor operation - if (test.operations.find(operation => cursorOperations.includes(operation.name))) - test.skipReason = 'TODO(NODE-5684): Not working yet'; - - if (bulkWriteOperations.includes(test.description)) - test.skipReason = - 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } + runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; if ( @@ -59,3 +65,10 @@ describe('CSOT spec tests', function () { return false; }); }); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); +}); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index cc767c1d80a..f5ada7eef9f 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,6 @@ /* Anything javascript specific relating to timeouts */ +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -7,6 +9,9 @@ import { BSON, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, Connection, type Db, type FindCursor, @@ -18,7 +23,9 @@ import { } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -171,8 +178,8 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('server-side maxTimeMS errors are transformed', () => { let client: MongoClient; - let commandsSucceeded; - let commandsFailed; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); @@ -221,18 +228,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command failed', async () => { - const error = await client - .db() - .command({ ping: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.property('code', 50); - - expect(commandsFailed).to.have.lengthOf(1); - expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { @@ -267,18 +278,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { afterEach(() => sinon.restore()); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client - .db('admin') - .command({ giveMeWriteErrors: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { @@ -306,22 +321,266 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: 'iteration', timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + await cursor.toArray(); + + expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores + expect( + commandStarted.filter(ev => { + return ( + ev.command.find != null && + ev.command.getMore != null && + ev.command.maxTimeMS != null + ); + }) + ).to.have.lengthOf(0); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient .db() - .collection('a') - .insertOne({}) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeConcernError.code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); }); }); }); + describe.skip('Tailable non-awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe.skip('Tailable awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 5b5b7040698..31414fa4664 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -215,7 +215,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -787,7 +788,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); From f38197ba7c52bd0f578b900dd6340379a2808c84 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 12 Sep 2024 15:24:39 -0400 Subject: [PATCH 080/136] fix(NODE-6374): MongoOperationTimeoutError inherits MongoRuntimeError (#4237) --- etc/notes/errors.md | 6 +++++- src/error.ts | 21 ++++++++++++++++++--- test/unit/error.test.ts | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..114bc1b2e2c 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,10 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +- TODO(NODE-5688): Add MongoOperationTimeoutError documentation + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/src/error.ts b/src/error.ts index 2b973cccc38..3853b7e19c2 100644 --- a/src/error.ts +++ b/src/error.ts @@ -314,7 +314,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -861,9 +861,24 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } /** - * @internal + * @public + * @category Error + * + * This error is thrown when an operation could not be completed within the specified `timeoutMS`. + * TODO(NODE-5688): expand this documentation. + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` */ -export class MongoOperationTimeoutError extends MongoRuntimeError { +export class MongoOperationTimeoutError extends MongoDriverError { override get name(): string { return 'MongoOperationTimeoutError'; } diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index bdc049cbc4f..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -173,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { From 607a53b4bfb23548d9cf196dee73c77e70e20e92 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 16:02:50 -0400 Subject: [PATCH 081/136] test: remove empty skipped context blocks (#4238) --- .../client-side-operations-timeout/node_csot.test.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f5ada7eef9f..56127cc8ace 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -576,11 +576,6 @@ describe('CSOT driver tests', metadata, () => { }); }); - describe.skip('Tailable non-awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe.skip('Tailable awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } From 1ee3f6d1e90c21250fad13c8c9fc33424893b13e Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 17 Sep 2024 13:27:43 -0400 Subject: [PATCH 082/136] feat(NODE-5844): add iscryptd to ServerDescription (#4239) --- src/sdam/server_description.ts | 4 ++ .../server_description.test.ts | 56 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 test/integration/server-discovery-and-monitoring/server_description.test.ts diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index aadf523d722..a650c8dba97 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -75,6 +75,8 @@ export class ServerDescription { maxWriteBatchSize: number | null; /** The max bson object size. */ maxBsonObjectSize: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -123,6 +125,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -176,6 +179,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..0a3c7eecbf6 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,56 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; + +import { MongoClient } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); From bf2b4acd82aff27d88f7488b39edb38d82429dcc Mon Sep 17 00:00:00 2001 From: Warren James Date: Wed, 25 Sep 2024 17:43:12 -0400 Subject: [PATCH 083/136] chore: allow clientBulkWrite to use TimeoutContext (#4251) --- .../client_bulk_write/client_bulk_write.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index e901407cd78..26d1e7bb60f 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -2,6 +2,7 @@ import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; @@ -43,7 +44,8 @@ export class ClientBulkWriteOperation extends CommandOperation { let command; @@ -52,7 +54,7 @@ export class ClientBulkWriteOperation extends CommandOperation Date: Wed, 2 Oct 2024 17:31:59 -0400 Subject: [PATCH 084/136] feat(NODE-6274): add CSOT support to bulkWrite (#4250) Co-authored-by: Bailey Pearson --- src/bulk/common.ts | 18 ++- ...ient_side_operations_timeout.prose.test.ts | 142 +++++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 7 - test/tools/unified-spec-runner/match.ts | 14 +- 4 files changed, 159 insertions(+), 22 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index dc0bcfb513f..22012207a09 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -501,7 +501,7 @@ export function mergeBatchResults( async function executeCommands( bulkOperation: BulkOperationBase, - options: BulkWriteOptions + options: BulkWriteOptions & { timeoutContext?: TimeoutContext | null } ): Promise { if (bulkOperation.s.batches.length === 0) { return new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered); @@ -552,7 +552,11 @@ async function executeCommands( let thrownError = null; let result; try { - result = await executeOperation(bulkOperation.s.collection.client, operation); + result = await executeOperation( + bulkOperation.s.collection.client, + operation, + finalOptions.timeoutContext + ); } catch (error) { thrownError = error; } @@ -866,7 +870,11 @@ export class BulkWriteShimOperation extends AbstractOperation { return 'bulkWrite' as const; } - async execute(_server: Server, session: ClientSession | undefined): Promise { + async execute( + _server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (this.options.session == null) { // An implicit session could have been created by 'executeOperation' // So if we stick it on finalOptions here, each bulk operation @@ -874,7 +882,7 @@ export class BulkWriteShimOperation extends AbstractOperation { // an explicit session would be this.options.session = session; } - return await executeCommands(this.bulkOperation, this.options); + return await executeCommands(this.bulkOperation, { ...this.options, timeoutContext }); } } @@ -1203,7 +1211,7 @@ export abstract class BulkOperationBase { const finalOptions = { ...this.s.options, ...options }; const operation = new BulkWriteShimOperation(this, finalOptions); - return await executeOperation(this.s.collection.client, operation); + return await executeOperation(this.s.collection.client, operation, finalOptions.timeoutContext); } /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 0d36998fd96..e276c9bbafd 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -7,6 +7,7 @@ import * as sinon from 'sinon'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -28,7 +29,7 @@ describe('CSOT spec prose tests', function () { await client?.close(); }); - context.skip('1. Multi-batch writes', () => { + describe('1. Multi-batch writes', { requires: { topology: 'single', mongodb: '>=4.4' } }, () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -55,6 +56,46 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it('performs two inserts which fail to complete before 2000 ms', async () => { + const inserts = []; + client.on('commandStarted', ev => inserts.push(ev)); + + const a = new Uint8Array(1000000 - 22); + const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); + const error = await client + .db('db') + .collection<{ _id: number; a: Uint8Array }>('coll') + .insertMany(oneMBDocs) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoBulkWriteError); + expect(error.errorResponse).to.be.instanceOf(MongoOperationTimeoutError); + expect(inserts.map(ev => ev.commandName)).to.deep.equal(['insert', 'insert']); + }); }); context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { @@ -901,4 +942,103 @@ describe('CSOT spec prose tests', function () { }); }); }); + + describe.skip( + '11. Multi-batch bulkWrites', + { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + function () { + /** + * ### 11. Multi-batch bulkWrites + * + * This test MUST only run against server versions 8.0+. This test must be skipped on Atlas Serverless. + * + * 1. Using `internalClient`, drop the `db.coll` collection. + * + * 2. Using `internalClient`, set the following fail point: + * + * @example + * ```javascript + * { + * configureFailPoint: "failCommand", + * mode: { + * times: 2 + * }, + * data: { + * failCommands: ["bulkWrite"], + * blockConnection: true, + * blockTimeMS: 1010 + * } + * } + * ``` + * + * 3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + * in the response. + * + * 4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + * + * 5. Create a list of write models (referred to as `models`) with the following write model repeated + * (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + * + * @example + * ```json + * InsertOne { + * "namespace": "db.coll", + * "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + * } + * ``` + * + * 6. Call `bulkWrite` on `client` with `models`. + * + * - Expect this to fail with a timeout error. + * + * 7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['bulkWrite'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + let maxBsonObjectSize: number; + let maxMessageSizeBytes: number; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + const hello = await internalClient.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + maxMessageSizeBytes = hello.maxMessageSizeBytes; + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + const writes = []; + client.on('commandStarted', ev => writes.push(ev)); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + const error = await client.bulkWrite(models).catch(error => error); + + expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); + expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); + }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + } + ); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 99914fa08e7..c2e08cfc80a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -5,7 +5,6 @@ import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const skippedSpecs = { - bulkWrite: 'TODO(NODE-6274)', 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', @@ -19,18 +18,12 @@ const skippedSpecs = { }; const skippedTests = { - 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', - 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': 'TODO(NODE-6305)', 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': - 'TODO(NODE-6274)', - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': - 'TODO(NODE-6274)', 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': 'TODO(DRIVERS-2965)', diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 662746b4591..931ba1c9ecc 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -788,15 +788,11 @@ export function expectErrorCheck( if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); - } - - // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their - // errorResponse field - if (expected.isTimeoutError === false) { - expect(error).to.not.be.instanceof(MongoOperationTimeoutError); - } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); + if ('errorResponse' in error) { + expect(error.errorResponse).to.be.instanceof(MongoOperationTimeoutError); + } else { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } } if (expected.errorContains != null) { From 3e2d32fba41b4278919f656614b9bced88aec82e Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 4 Oct 2024 10:52:21 -0400 Subject: [PATCH 085/136] feat(NODE-6275): Add CSOT support to GridFS (#4246) Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- package-lock.json | 9 +- package.json | 2 +- src/collection.ts | 10 +- src/gridfs/download.ts | 44 +++- src/gridfs/index.ts | 74 +++++-- src/gridfs/upload.ts | 191 ++++++++++++++---- src/operations/find.ts | 1 - src/timeout.ts | 12 ++ ...ient_side_operations_timeout.prose.test.ts | 171 +++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 5 - .../node_csot.test.ts | 167 ++++++++++++++- test/tools/unified-spec-runner/operations.ts | 37 +++- 12 files changed, 634 insertions(+), 89 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1d9cebf509b..2b3a9b897aa 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6440,10 +6440,11 @@ } }, "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.2.tgz", + "integrity": "sha512-oj+LLtvhhi8XuAQ8dll2BVjrnKxOo/7ylyQu0LsKmzyGcbrvzcyvFUOLC6rPhuJPOvnezh3MZ3/Sk9Tl1jpUpg==", "dev": true, + "license": "Apache-2.0", "dependencies": { "mongodb": "^6.0.0" }, diff --git a/package.json b/package.json index 2de0e1811f0..0c4c668726a 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/collection.ts b/src/collection.ts index a73a5276f5f..62fa5bd4cba 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -501,12 +501,18 @@ export class Collection { */ async findOne(): Promise | null>; async findOne(filter: Filter): Promise | null>; - async findOne(filter: Filter, options: FindOptions): Promise | null>; + async findOne( + filter: Filter, + options: Omit + ): Promise | null>; // allow an override of the schema. async findOne(): Promise; async findOne(filter: Filter): Promise; - async findOne(filter: Filter, options?: FindOptions): Promise; + async findOne( + filter: Filter, + options?: Omit + ): Promise; async findOne( filter: Filter = {}, diff --git a/src/gridfs/download.ts b/src/gridfs/download.ts index 06dda0a92ba..19651b885ea 100644 --- a/src/gridfs/download.ts +++ b/src/gridfs/download.ts @@ -2,6 +2,7 @@ import { Readable } from 'stream'; import type { Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { FindCursor } from '../cursor/find_cursor'; import { MongoGridFSChunkError, @@ -12,6 +13,7 @@ import { import type { FindOptions } from '../operations/find'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; import type { Callback } from '../utils'; import type { GridFSChunk } from './upload'; @@ -28,7 +30,7 @@ export interface GridFSBucketReadStreamOptions { * to be returned by the stream. `end` is non-inclusive */ end?: number; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -98,8 +100,10 @@ export interface GridFSBucketReadStreamPrivate { skip?: number; start: number; end: number; + timeoutMS?: number; }; readPreference?: ReadPreference; + timeoutContext?: CSOTTimeoutContext; } /** @@ -148,7 +152,11 @@ export class GridFSBucketReadStream extends Readable { end: 0, ...options }, - readPreference + readPreference, + timeoutContext: + options?.timeoutMS != null + ? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 }) + : undefined }; } @@ -196,7 +204,8 @@ export class GridFSBucketReadStream extends Readable { async abort(): Promise { this.push(null); this.destroy(); - await this.s.cursor?.close(); + const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow(); + await this.s.cursor?.close({ timeoutMS: remainingTimeMS }); } } @@ -352,7 +361,22 @@ function init(stream: GridFSBucketReadStream): void { filter['n'] = { $gte: skip }; } } - stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 }); + + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + return stream.destroy(error); + } + + stream.s.cursor = stream.s.chunks + .find(filter, { + timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .sort({ n: 1 }); if (stream.s.readPreference) { stream.s.cursor.withReadPreference(stream.s.readPreference); @@ -371,6 +395,18 @@ function init(stream: GridFSBucketReadStream): void { return; }; + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + if (!stream.destroyed) stream.destroy(error); + return; + } + + findOneOptions.timeoutMS = remainingTimeMS; + stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => { if (stream.destroyed) return; stream.destroy(error); diff --git a/src/gridfs/index.ts b/src/gridfs/index.ts index 51c32b7a01c..de114e5e597 100644 --- a/src/gridfs/index.ts +++ b/src/gridfs/index.ts @@ -2,10 +2,12 @@ import type { ObjectId } from '../bson'; import type { Collection } from '../collection'; import type { FindCursor } from '../cursor/find_cursor'; import type { Db } from '../db'; -import { MongoRuntimeError } from '../error'; +import { MongoOperationTimeoutError, MongoRuntimeError } from '../error'; import { type Filter, TypedEventEmitter } from '../mongo_types'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; +import { resolveOptions } from '../utils'; import { WriteConcern, type WriteConcernOptions } from '../write_concern'; import type { FindOptions } from './../operations/find'; import { @@ -48,6 +50,7 @@ export interface GridFSBucketPrivate { chunkSizeBytes: number; readPreference?: ReadPreference; writeConcern: WriteConcern | undefined; + timeoutMS?: number; }; _chunksCollection: Collection; _filesCollection: Collection; @@ -81,11 +84,11 @@ export class GridFSBucket extends TypedEventEmitter { constructor(db: Db, options?: GridFSBucketOptions) { super(); this.setMaxListeners(0); - const privateOptions = { + const privateOptions = resolveOptions(db, { ...DEFAULT_GRIDFS_BUCKET_OPTIONS, ...options, writeConcern: WriteConcern.fromOptions(options) - }; + }); this.s = { db, options: privateOptions, @@ -109,7 +112,10 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, options); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options + }); } /** @@ -122,7 +128,11 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, { ...options, id }); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options, + id + }); } /** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */ @@ -135,7 +145,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { _id: id }, - options + { timeoutMS: this.s.options.timeoutMS, ...options } ); } @@ -144,11 +154,27 @@ export class GridFSBucket extends TypedEventEmitter { * * @param id - The id of the file doc */ - async delete(id: ObjectId): Promise { - const { deletedCount } = await this.s._filesCollection.deleteOne({ _id: id }); + async delete(id: ObjectId, options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + const { deletedCount } = await this.s._filesCollection.deleteOne( + { _id: id }, + { timeoutMS: timeoutContext?.remainingTimeMS } + ); + + const remainingTimeMS = timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`); // Delete orphaned chunks before returning FileNotFound - await this.s._chunksCollection.deleteMany({ files_id: id }); + await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS }); if (deletedCount === 0) { // TODO(NODE-3483): Replace with more appropriate error @@ -188,7 +214,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { filename }, - { ...options, sort, skip } + { timeoutMS: this.s.options.timeoutMS, ...options, sort, skip } ); } @@ -198,18 +224,36 @@ export class GridFSBucket extends TypedEventEmitter { * @param id - the id of the file to rename * @param filename - new name for the file */ - async rename(id: ObjectId, filename: string): Promise { + async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise { const filter = { _id: id }; const update = { $set: { filename } }; - const { matchedCount } = await this.s._filesCollection.updateOne(filter, update); + const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options); if (matchedCount === 0) { throw new MongoRuntimeError(`File with id ${id} not found`); } } /** Removes this bucket's files collection, followed by its chunks collection. */ - async drop(): Promise { - await this.s._filesCollection.drop(); - await this.s._chunksCollection.drop(); + async drop(options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + + if (timeoutContext) { + await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS }); + const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow( + `Timed out after ${timeoutMS}ms` + ); + await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS }); + } else { + await this.s._filesCollection.drop(); + await this.s._chunksCollection.drop(); + } } } diff --git a/src/gridfs/upload.ts b/src/gridfs/upload.ts index f54d5131f66..c7544b715d8 100644 --- a/src/gridfs/upload.ts +++ b/src/gridfs/upload.ts @@ -2,7 +2,14 @@ import { Writable } from 'stream'; import { type Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; -import { MongoAPIError, MONGODB_ERROR_CODES, MongoError } from '../error'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { + MongoAPIError, + MONGODB_ERROR_CODES, + MongoError, + MongoOperationTimeoutError +} from '../error'; +import { CSOTTimeoutContext } from '../timeout'; import { type Callback, squashError } from '../utils'; import type { WriteConcernOptions } from '../write_concern'; import { WriteConcern } from './../write_concern'; @@ -35,7 +42,7 @@ export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions { * @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead. */ aliases?: string[]; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -97,6 +104,8 @@ export class GridFSBucketWriteStream extends Writable { * ``` */ gridFSFile: GridFSFile | null = null; + /** @internal */ + timeoutContext?: CSOTTimeoutContext; /** * @param bucket - Handle for this stream's corresponding bucket @@ -131,14 +140,11 @@ export class GridFSBucketWriteStream extends Writable { aborted: false }; - if (!this.bucket.s.calledOpenUploadStream) { - this.bucket.s.calledOpenUploadStream = true; - - checkIndexes(this).then(() => { - this.bucket.s.checkedIndexes = true; - this.bucket.emit('index'); - }, squashError); - } + if (options.timeoutMS != null) + this.timeoutContext = new CSOTTimeoutContext({ + timeoutMS: options.timeoutMS, + serverSelectionTimeoutMS: this.bucket.s.db.client.options.serverSelectionTimeoutMS + }); } /** @@ -147,10 +153,26 @@ export class GridFSBucketWriteStream extends Writable { * The stream is considered constructed when the indexes are done being created */ override _construct(callback: (error?: Error | null) => void): void { - if (this.bucket.s.checkedIndexes) { + if (!this.bucket.s.calledOpenUploadStream) { + this.bucket.s.calledOpenUploadStream = true; + + checkIndexes(this).then( + () => { + this.bucket.s.checkedIndexes = true; + this.bucket.emit('index'); + callback(); + }, + error => { + if (error instanceof MongoOperationTimeoutError) { + return handleError(this, error, callback); + } + squashError(error); + callback(); + } + ); + } else { return process.nextTick(callback); } - this.bucket.once('index', callback); } /** @@ -194,7 +216,10 @@ export class GridFSBucketWriteStream extends Writable { } this.state.aborted = true; - await this.chunks.deleteMany({ files_id: this.id }); + const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${this.timeoutContext?.timeoutMS}ms` + ); + await this.chunks.deleteMany({ files_id: this.id, timeoutMS: remainingTimeMS }); } } @@ -219,9 +244,19 @@ function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise { const index = { files_id: 1, n: 1 }; + let remainingTimeMS; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + let indexes; try { - indexes = await stream.chunks.listIndexes().toArray(); + indexes = await stream.chunks + .listIndexes({ + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -239,10 +274,14 @@ async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise }); if (!hasChunksIndex) { + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); await stream.chunks.createIndex(index, { ...stream.writeConcern, background: true, - unique: true + unique: true, + timeoutMS: remainingTimeMS }); } } @@ -270,13 +309,28 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { return; } - stream.files.insertOne(gridFSFile, { writeConcern: stream.writeConcern }).then( - () => { - stream.gridFSFile = gridFSFile; - callback(); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + + stream.files + .insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + stream.gridFSFile = gridFSFile; + callback(); + }, + error => { + return handleError(stream, error, callback); + } + ); return; } @@ -284,7 +338,16 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { } async function checkIndexes(stream: GridFSBucketWriteStream): Promise { - const doc = await stream.files.findOne({}, { projection: { _id: 1 } }); + let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const doc = await stream.files.findOne( + {}, + { + projection: { _id: 1 }, + timeoutMS: remainingTimeMS + } + ); if (doc != null) { // If at least one document exists assume the collection has the required index return; @@ -293,8 +356,15 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { const index = { filename: 1, uploadDate: 1 }; let indexes; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const listIndexesOptions = { + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }; try { - indexes = await stream.files.listIndexes().toArray(); + indexes = await stream.files.listIndexes(listIndexesOptions).toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -312,7 +382,11 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { }); if (!hasFileIndex) { - await stream.files.createIndex(index, { background: false }); + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + + await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS }); } await checkChunksIndex(stream); @@ -386,6 +460,18 @@ function doWrite( let doc: GridFSChunk; if (spaceRemaining === 0) { doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore)); + + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; ++outstandingRequests; @@ -393,17 +479,21 @@ function doWrite( return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - --outstandingRequests; - - if (!outstandingRequests) { - checkDone(stream, callback); + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + --outstandingRequests; + + if (!outstandingRequests) { + checkDone(stream, callback); + } + }, + error => { + return handleError(stream, error, callback); } - }, - error => handleError(stream, error, callback) - ); + ); spaceRemaining = stream.chunkSizeBytes; stream.pos = 0; @@ -420,8 +510,6 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return checkDone(stream, callback); } - ++stream.state.outstandingRequests; - // Create a new buffer to make sure the buffer isn't bigger than it needs // to be. const remnant = Buffer.alloc(stream.pos); @@ -433,13 +521,28 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - checkDone(stream, callback); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + checkDone(stream, callback); + }, + error => { + return handleError(stream, error, callback); + } + ); } function isAborted(stream: GridFSBucketWriteStream, callback: Callback): boolean { diff --git a/src/operations/find.ts b/src/operations/find.ts index c39695cc0bc..641255553a0 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -66,7 +66,6 @@ export interface FindOptions */ oplogReplay?: boolean; - /** @internal*/ timeoutMode?: CursorTimeoutMode; } diff --git a/src/timeout.ts b/src/timeout.ts index f7fb3d0daa5..f694b5f4f4f 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -305,6 +305,18 @@ export class CSOTTimeoutContext extends TimeoutContext { this._serverSelectionTimeout?.clear(); this._connectionCheckoutTimeout?.clear(); } + + /** + * @internal + * Throws a MongoOperationTimeoutError if the context has expired. + * If the context has not expired, returns the `remainingTimeMS` + **/ + getRemainingTimeMSOrThrow(message?: string): number { + const { remainingTimeMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); + return remainingTimeMS; + } } /** @internal */ diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index e276c9bbafd..1b8c34633b4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -3,15 +3,20 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { Readable } from 'stream'; +import { pipeline } from 'stream/promises'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + GridFSBucket, MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, - now + now, + ObjectId, + promiseWithResolvers } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -398,10 +403,42 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('6. GridFS - Upload', () => { + context('6. GridFS - Upload', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + if (client) { + await client.close(); + } + }); /** Tests in this section MUST only be run against server versions 4.4 and higher. */ - context('uploads via openUploadStream can be timed out', () => { + it('uploads via openUploadStream can be timed out', metadata, async function () { /** * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. * 1. Using `internalClient`, set the following fail point: @@ -424,9 +461,30 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.close()` to flush the stream and insert chunks. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const bucket = new GridFSBucket(client.db('db')); + const stream = bucket.openUploadStream('filename'); + const data = Buffer.from('13', 'hex'); + + const fileStream = Readable.from(data); + const maybeError = await pipeline(fileStream, stream).then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); }); - context('Aborting an upload stream can be timed out', () => { + it('Aborting an upload stream can be timed out', metadata, async function () { /** * This test only applies to drivers that provide an API to abort a GridFS upload stream. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -450,10 +508,92 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.abort()`. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['delete'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + await internalClient.db().admin().command(failpoint); + const bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 300 }); + + const data = Buffer.from('01020304', 'hex'); + + const { promise: writePromise, resolve, reject } = promiseWithResolvers(); + uploadStream.on('error', error => uploadStream.destroy(error)); + uploadStream.write(data, error => { + if (error) reject(error); + else resolve(); + }); + let maybeError = await writePromise.then( + () => null, + e => e + ); + expect(maybeError).to.be.null; + + maybeError = await uploadStream.abort().then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + uploadStream.destroy(); }); }); - context.skip('7. GridFS - Download', () => { + context('7. GridFS - Download', () => { + let internalClient: MongoClient; + let client: MongoClient; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + + await files.insertOne({ + _id: new ObjectId('000000000000000000000005'), + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + + if (client) { + await client.close(); + } + }); + /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -495,6 +635,27 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against `db.fs.chunks`. */ + it('download streams can be timed out', metadata, async function () { + const bucket = new GridFSBucket(client.db('db')); + const downloadStream = bucket.openDownloadStream(new ObjectId('000000000000000000000005')); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); }); context('8. Server Selection', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index c2e08cfc80a..49ddabc924b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -8,11 +8,6 @@ const skippedSpecs = { 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', - 'gridfs-advanced': 'TODO(NODE-6275)', - 'gridfs-delete': 'TODO(NODE-6275)', - 'gridfs-download': 'TODO(NODE-6275)', - 'gridfs-find': 'TODO(NODE-6275)', - 'gridfs-upload': 'TODO(NODE-6275)', 'tailable-awaitData': 'TODO(NODE-6035)', 'tailable-non-awaitData': 'TODO(NODE-6035)' }; diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 56127cc8ace..b2011ee2e73 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,7 @@ /* Anything javascript specific relating to timeouts */ +import { once } from 'node:events'; +import { Readable } from 'node:stream'; +import { pipeline } from 'node:stream/promises'; import { setTimeout } from 'node:timers/promises'; import { expect } from 'chai'; @@ -15,11 +18,13 @@ import { Connection, type Db, type FindCursor, + GridFSBucket, LEGACY_HELLO_COMMAND, type MongoClient, MongoInvalidArgumentError, MongoOperationTimeoutError, - MongoServerError + MongoServerError, + ObjectId } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -576,6 +581,166 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('GridFSBucket', () => { + const blockTimeMS = 200; + let internalClient: MongoClient; + let client: MongoClient; + let bucket: GridFSBucket; + + beforeEach(async function () { + client = this.configuration.newClient(undefined, { timeoutMS: 1000 }); + internalClient = this.configuration.newClient(undefined); + }); + + afterEach(async function () { + await client.close(); + await internalClient.db().admin().command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + }); + + context('upload', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + }); + + describe('openUploadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 175 }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + + it('only emits index event once per bucket', metadata, async function () { + let numEventsSeen = 0; + bucket.on('index', () => numEventsSeen++); + + const uploadStream0 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream0.destroy(error)); + const uploadStream1 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream1.destroy(error)); + + const data = Buffer.from('test', 'utf-8'); + await pipeline(Readable.from(data), uploadStream0); + await pipeline(Readable.from(data), uploadStream1); + + expect(numEventsSeen).to.equal(1); + }); + }); + + describe('openUploadStreamWithId', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStreamWithId(new ObjectId(), 'filename', { + timeoutMS: 175 + }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + + context('download', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS + } + }; + const _id = new ObjectId('000000000000000000000005'); + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + await files.insertOne({ + _id, + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(db); + }); + + describe('openDownloadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStream(_id, { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('openDownloadStreamByName', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStreamByName('length-10', { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + }); + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 31414fa4664..a9f79842c31 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -11,6 +11,7 @@ import { CommandStartedEvent, Db, type Document, + GridFSBucket, type MongoClient, MongoError, ReadConcern, @@ -311,7 +312,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { operations.set('drop', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.drop(); + return bucket.drop(operation.arguments); }); operations.set('dropIndexes', async ({ entities, operation }) => { @@ -529,7 +530,8 @@ operations.set('targetedFailPoint', async ({ entities, operation }) => { operations.set('delete', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.delete(operation.arguments!.id); + const { id, ...opts } = operation.arguments; + return bucket.delete(id, opts); }); operations.set('download', async ({ entities, operation }) => { @@ -537,7 +539,8 @@ operations.set('download', async ({ entities, operation }) => { const { id, ...options } = operation.arguments ?? {}; const stream = bucket.openDownloadStream(id, options); - return Buffer.concat(await stream.toArray()); + const data = Buffer.concat(await stream.toArray()); + return data; }); operations.set('downloadByName', async ({ entities, operation }) => { @@ -552,7 +555,6 @@ operations.set('downloadByName', async ({ entities, operation }) => { operations.set('upload', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); const { filename, source, ...options } = operation.arguments ?? {}; - const stream = bucket.openUploadStream(filename, options); const fileStream = Readable.from(Buffer.from(source.$$hexBytes, 'hex')); @@ -832,9 +834,30 @@ operations.set('updateOne', async ({ entities, operation }) => { }); operations.set('rename', async ({ entities, operation }) => { - const collection = entities.getEntity('collection', operation.object); - const { to, ...options } = operation.arguments!; - return collection.rename(to, options); + let entity: GridFSBucket | Collection | undefined; + try { + entity = entities.getEntity('collection', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof Collection) { + const { to, ...options } = operation.arguments!; + return entity.rename(to, options); + } + + try { + entity = entities.getEntity('bucket', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof GridFSBucket) { + const { id, newFilename, ...opts } = operation.arguments!; + return entity.rename(id, newFilename, opts as any); + } + + expect.fail(`No collection or bucket with name '${operation.object}' found`); }); operations.set('createDataKey', async ({ entities, operation }) => { From faae6d559ac774a09b0ea603f00661a4edebf19e Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Fri, 4 Oct 2024 15:16:50 -0600 Subject: [PATCH 086/136] refactor(NODE-6411): AbstractCursor accepts an external timeout context (#4264) --- src/cmap/connection.ts | 6 +- src/cursor/abstract_cursor.ts | 134 ++++++++++++++---- src/index.ts | 1 + src/operations/find.ts | 5 +- src/timeout.ts | 17 +++ .../node_csot.test.ts | 18 ++- .../crud/find_cursor_methods.test.js | 52 +++++-- .../node-specific/abstract_cursor.test.ts | 117 ++++++++++++++- ...er_selection.prose.operation_count.test.ts | 23 +-- test/tools/utils.ts | 32 ++++- 10 files changed, 330 insertions(+), 75 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507615e9f03..a43d6106c7b 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -422,9 +422,9 @@ export class Connection extends TypedEventEmitter { ...options }; - if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { - const { maxTimeMS } = options.timeoutContext; - if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + if (!options.omitMaxTimeMS) { + const maxTimeMS = options.timeoutContext?.maxTimeMS; + if (maxTimeMS && maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } const message = this.supportsOpMsg diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index d0f386923ad..f7e488d24b2 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,7 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; -import { TimeoutContext } from '../timeout'; +import { type CSOTTimeoutContext, type Timeout, TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -119,6 +119,14 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { timeoutMS?: number; /** @internal TODO(NODE-5688): make this public */ timeoutMode?: CursorTimeoutMode; + + /** + * @internal + * + * A timeout context to govern the total time the cursor can live. If provided, the cursor + * cannot be used in ITERATION mode. + */ + timeoutContext?: CursorTimeoutContext; } /** @internal */ @@ -171,7 +179,7 @@ export abstract class AbstractCursor< /** @internal */ protected readonly cursorOptions: InternalAbstractCursorOptions; /** @internal */ - protected timeoutContext?: TimeoutContext; + protected timeoutContext?: CursorTimeoutContext; /** @event */ static readonly CLOSE = 'close' as const; @@ -205,20 +213,12 @@ export abstract class AbstractCursor< }; this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.timeoutMode == null) { - if (options.tailable) { - this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; - } else { - this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; - } - } else { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError( - "Cannot set tailable cursor's timeoutMode to LIFETIME" - ); - } - this.cursorOptions.timeoutMode = options.timeoutMode; + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); } + this.cursorOptions.timeoutMode = + options.timeoutMode ?? + (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); @@ -264,6 +264,17 @@ export abstract class AbstractCursor< utf8: options?.enableUtf8Validation === false ? false : true } }; + + if ( + options.timeoutContext != null && + options.timeoutMS != null && + this.cursorOptions.timeoutMode !== CursorTimeoutMode.LIFETIME + ) { + throw new MongoAPIError( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME.` + ); + } + this.timeoutContext = options.timeoutContext; } /** @@ -721,6 +732,9 @@ export abstract class AbstractCursor< * if the resultant data has already been retrieved by this cursor. */ rewind(): void { + if (this.timeoutContext && this.timeoutContext.owner !== this) { + throw new MongoAPIError(`Cannot rewind cursor that does not own its timeout context.`); + } if (!this.initialized) { return; } @@ -790,10 +804,13 @@ export abstract class AbstractCursor< */ private async cursorInit(): Promise { if (this.cursorOptions.timeoutMS != null) { - this.timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS: this.cursorOptions.timeoutMS - }); + this.timeoutContext ??= new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }), + this + ); } try { const state = await this._initialize(this.cursorSession); @@ -872,6 +889,20 @@ export abstract class AbstractCursor< private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; + const timeoutContextForKillCursors = (): CursorTimeoutContext | undefined => { + if (timeoutMS != null) { + this.timeoutContext?.clear(); + return new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }), + this + ); + } else { + return this.timeoutContext?.refreshed(); + } + }; try { if ( !this.isKilled && @@ -884,23 +915,13 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; - let timeoutContext: TimeoutContext | undefined; - if (timeoutMS != null) { - this.timeoutContext?.clear(); - timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS - }); - } else { - this.timeoutContext?.refresh(); - timeoutContext = this.timeoutContext; - } + await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session }), - timeoutContext + timeoutContextForKillCursors() ); } } catch (error) { @@ -1042,3 +1063,54 @@ class ReadableCursorStream extends Readable { } configureResourceManagement(AbstractCursor.prototype); + +/** + * @internal + * The cursor timeout context is a wrapper around a timeout context + * that keeps track of the "owner" of the cursor. For timeout contexts + * instantiated inside a cursor, the owner will be the cursor. + * + * All timeout behavior is exactly the same as the wrapped timeout context's. + */ +export class CursorTimeoutContext extends TimeoutContext { + constructor( + public timeoutContext: TimeoutContext, + public owner: symbol | AbstractCursor + ) { + super(); + } + override get serverSelectionTimeout(): Timeout | null { + return this.timeoutContext.serverSelectionTimeout; + } + override get connectionCheckoutTimeout(): Timeout | null { + return this.timeoutContext.connectionCheckoutTimeout; + } + override get clearServerSelectionTimeout(): boolean { + return this.timeoutContext.clearServerSelectionTimeout; + } + override get clearConnectionCheckoutTimeout(): boolean { + return this.timeoutContext.clearConnectionCheckoutTimeout; + } + override get timeoutForSocketWrite(): Timeout | null { + return this.timeoutContext.timeoutForSocketWrite; + } + override get timeoutForSocketRead(): Timeout | null { + return this.timeoutContext.timeoutForSocketRead; + } + override csotEnabled(): this is CSOTTimeoutContext { + return this.timeoutContext.csotEnabled(); + } + override refresh(): void { + return this.timeoutContext.refresh(); + } + override clear(): void { + return this.timeoutContext.clear(); + } + override get maxTimeMS(): number | null { + return this.timeoutContext.maxTimeMS; + } + + override refreshed(): CursorTimeoutContext { + return new CursorTimeoutContext(this.timeoutContext.refreshed(), this.owner); + } +} diff --git a/src/index.ts b/src/index.ts index e555d97e9ed..a49dc015526 100644 --- a/src/index.ts +++ b/src/index.ts @@ -359,6 +359,7 @@ export type { CursorStreamOptions } from './cursor/abstract_cursor'; export type { + CursorTimeoutContext, InitialCursorResponse, InternalAbstractCursorOptions } from './cursor/abstract_cursor'; diff --git a/src/operations/find.ts b/src/operations/find.ts index 641255553a0..348467acf75 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,6 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; -import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; @@ -17,7 +17,8 @@ import { Aspect, defineAspects, type Hint } from './operation'; */ // eslint-disable-next-line @typescript-eslint/no-unused-vars export interface FindOptions - extends Omit { + extends Omit, + AbstractCursorOptions { /** Sets the limit of documents returned in the query. */ limit?: number; /** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */ diff --git a/src/timeout.ts b/src/timeout.ts index f694b5f4f4f..9041ce4b88d 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -178,6 +178,8 @@ export abstract class TimeoutContext { else throw new MongoRuntimeError('Unrecognized options'); } + abstract get maxTimeMS(): number | null; + abstract get serverSelectionTimeout(): Timeout | null; abstract get connectionCheckoutTimeout(): Timeout | null; @@ -195,6 +197,9 @@ export abstract class TimeoutContext { abstract refresh(): void; abstract clear(): void; + + /** Returns a new instance of the TimeoutContext, with all timeouts refreshed and restarted. */ + abstract refreshed(): TimeoutContext; } /** @internal */ @@ -317,6 +322,10 @@ export class CSOTTimeoutContext extends TimeoutContext { throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); return remainingTimeMS; } + + override refreshed(): CSOTTimeoutContext { + return new CSOTTimeoutContext(this); + } } /** @internal */ @@ -363,4 +372,12 @@ export class LegacyTimeoutContext extends TimeoutContext { clear(): void { return; } + + get maxTimeMS() { + return null; + } + + override refreshed(): LegacyTimeoutContext { + return new LegacyTimeoutContext(this.options); + } } diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b2011ee2e73..f4cfc7d882c 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -26,7 +26,7 @@ import { MongoServerError, ObjectId } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; @@ -362,7 +362,7 @@ describe('CSOT driver tests', metadata, () => { }; beforeEach(async function () { - internalClient = this.configuration.newClient(); + internalClient = this.configuration.newClient({}); await internalClient .db('db') .dropCollection('coll') @@ -378,7 +378,11 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize: 10 }); + + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); @@ -492,7 +496,13 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + minPoolSize: 10 + }); + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); diff --git a/test/integration/crud/find_cursor_methods.test.js b/test/integration/crud/find_cursor_methods.test.js index 42eeda3e816..21a6649bf0b 100644 --- a/test/integration/crud/find_cursor_methods.test.js +++ b/test/integration/crud/find_cursor_methods.test.js @@ -1,7 +1,13 @@ 'use strict'; const { expect } = require('chai'); const { filterForCommands } = require('../shared'); -const { promiseWithResolvers, MongoCursorExhaustedError } = require('../../mongodb'); +const { + promiseWithResolvers, + MongoCursorExhaustedError, + CursorTimeoutContext, + TimeoutContext, + MongoAPIError +} = require('../../mongodb'); describe('Find Cursor', function () { let client; @@ -246,23 +252,45 @@ describe('Find Cursor', function () { }); context('#rewind', function () { - it('should rewind a cursor', function (done) { + it('should rewind a cursor', async function () { const coll = client.db().collection('abstract_cursor'); const cursor = coll.find({}); - this.defer(() => cursor.close()); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + try { + let docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); cursor.rewind(); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); + } finally { + await cursor.close(); + } + }); - done(); - }); - }); + it('throws if the cursor does not own its timeoutContext', async function () { + const coll = client.db().collection('abstract_cursor'); + const cursor = coll.find( + {}, + { + timeoutContext: new CursorTimeoutContext( + TimeoutContext.create({ + timeoutMS: 1000, + serverSelectionTimeoutMS: 1000 + }), + Symbol() + ) + } + ); + + try { + cursor.rewind(); + expect.fail(`rewind should have thrown.`); + } catch (error) { + expect(error).to.be.instanceOf(MongoAPIError); + } finally { + await cursor.close(); + } }); it('should end an implicit session on rewind', { diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index a5e7fba13dd..136e72a3499 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,12 +7,17 @@ import { inspect } from 'util'; import { AbstractCursor, type Collection, + CursorTimeoutContext, + CursorTimeoutMode, type FindCursor, MongoAPIError, type MongoClient, MongoCursorExhaustedError, - MongoServerError + MongoOperationTimeoutError, + MongoServerError, + TimeoutContext } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; describe('class AbstractCursor', function () { describe('regression tests NODE-5372', function () { @@ -395,4 +400,114 @@ describe('class AbstractCursor', function () { expect(nextSpy.callCount).to.be.lessThan(numDocuments); }); }); + + describe('externally provided timeout contexts', function () { + let client: MongoClient; + let collection: Collection; + let context: CursorTimeoutContext; + + beforeEach(async function () { + client = this.configuration.newClient(); + + collection = client.db('abstract_cursor_integration').collection('test'); + + context = new CursorTimeoutContext( + TimeoutContext.create({ timeoutMS: 1000, serverSelectionTimeoutMS: 2000 }), + Symbol() + ); + + await collection.insertMany([{ a: 1 }, { b: 2 }, { c: 3 }]); + }); + + afterEach(async function () { + await collection.deleteMany({}); + await client.close(); + }); + + describe('when timeoutMode != LIFETIME', function () { + it('an error is thrown', function () { + expect(() => + collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.ITERATION } + ) + ).to.throw( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME` + ); + }); + }); + + describe('when timeoutMode is omitted', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find({}, { timeoutContext: context, timeoutMS: 1000 }); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when timeoutMode is LIFETIME', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor is initialized', function () { + it('the provided timeoutContext is not overwritten', async function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + await cursor.toArray(); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor refreshes the timeout for killCursors', function () { + it( + 'the provided timeoutContext is not modified', + { + requires: { + mongodb: '>=4.4' + } + }, + async function () { + await client.db('admin').command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 5000 + } + } as FailPoint); + + const cursor = collection.find( + {}, + { + timeoutContext: context, + timeoutMS: 1000, + timeoutMode: CursorTimeoutMode.LIFETIME, + batchSize: 1 + } + ); + + const error = await cursor.toArray().catch(e => e); + + expect(error).to.be.instanceof(MongoOperationTimeoutError); + // @ts-expect-error We know we have a CSOT timeout context but TS does not. + expect(context.timeoutContext.remainingTimeMS).to.be.lessThan(0); + } + ); + }); + }); }); diff --git a/test/integration/server-selection/server_selection.prose.operation_count.test.ts b/test/integration/server-selection/server_selection.prose.operation_count.test.ts index fec6d24e61c..b4a7d9bf47b 100644 --- a/test/integration/server-selection/server_selection.prose.operation_count.test.ts +++ b/test/integration/server-selection/server_selection.prose.operation_count.test.ts @@ -1,5 +1,4 @@ import { expect } from 'chai'; -import { on } from 'events'; import { type Collection, @@ -7,7 +6,7 @@ import { HostAddress, type MongoClient } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { waitUntilPoolsFilled } from '../../tools/utils'; const failPoint = { configureFailPoint: 'failCommand', @@ -28,17 +27,6 @@ async function runTaskGroup(collection: Collection, count: 10 | 100 | 1000) { } } -async function ensurePoolIsFull(client: MongoClient): Promise { - let connectionCount = 0; - - for await (const _event of on(client, 'connectionCreated')) { - connectionCount++; - if (connectionCount === POOL_SIZE * 2) { - break; - } - } -} - // Step 1: Configure a sharded cluster with two mongoses. Use a 4.2.9 or newer server version. const TEST_METADATA: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } }; @@ -75,15 +63,8 @@ describe('operationCount-based Selection Within Latency Window - Prose Test', fu client.on('commandStarted', updateCount); - const poolIsFullPromise = ensurePoolIsFull(client); - - await client.connect(); - // Step 4: Using CMAP events, ensure the client's connection pools for both mongoses have been saturated - const poolIsFull = Promise.race([poolIsFullPromise, sleep(30 * 1000)]); - if (!poolIsFull) { - throw new Error('Timed out waiting for connection pool to fill to minPoolSize'); - } + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), POOL_SIZE * 2); seeds = client.topology.s.seedlist.map(address => address.toString()); diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 3cb50d2cd51..8614bd7d64c 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -1,5 +1,5 @@ import * as child_process from 'node:child_process'; -import { once } from 'node:events'; +import { on, once } from 'node:events'; import * as fs from 'node:fs/promises'; import * as path from 'node:path'; @@ -568,3 +568,33 @@ export async function itInNodeProcess( } }); } + +/** + * Connects the client and waits until `client` has emitted `count` connectionCreated events. + * + * **This will hang if the client does not have a maxPoolSizeSet!** + * + * This is useful when you want to ensure that the client has pools that are full of connections. + * + * This does not guarantee that all pools that the client has are completely full unless + * count = number of servers to which the client is connected * maxPoolSize. But it can + * serve as a way to ensure that some connections have been established and are in the pools. + */ +export async function waitUntilPoolsFilled( + client: MongoClient, + signal: AbortSignal, + count: number = client.s.options.maxPoolSize +): Promise { + let connectionCount = 0; + + async function wait$() { + for await (const _event of on(client, 'connectionCreated', { signal })) { + connectionCount++; + if (connectionCount >= count) { + break; + } + } + } + + await Promise.all([wait$(), client.connect()]); +} From af6a4e2c675d4d325d6312c93e7788e44f587ad0 Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 7 Oct 2024 13:07:46 -0400 Subject: [PATCH 087/136] feat(NODE-6305): Add CSOT support to tailable cursors (#4218) Co-authored-by: Neal Beeken --- src/cursor/abstract_cursor.ts | 51 +++- src/cursor/run_command_cursor.ts | 2 + src/mongo_client.ts | 5 + src/operations/create_collection.ts | 1 + test/benchmarks/driverBench/common.js | 4 +- ...ient_side_operations_timeout.prose.test.ts | 40 ++-- ...lient_side_operations_timeout.spec.test.ts | 7 +- .../node_csot.test.ts | 221 +++++++++++++++++- .../tailable-awaitData.json | 146 ++++++++++++ .../tailable-non-awaitData.json | 151 ++++++++++++ test/tools/unified-spec-runner/operations.ts | 45 +++- 11 files changed, 641 insertions(+), 32 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index f7e488d24b2..255a977a5f9 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -209,12 +209,35 @@ export abstract class AbstractCursor< options.readPreference && options.readPreference instanceof ReadPreference ? options.readPreference : ReadPreference.primary, - ...pluckBSONSerializeOptions(options) + ...pluckBSONSerializeOptions(options), + timeoutMS: options.timeoutMS, + tailable: options.tailable, + awaitData: options.awaitData }; - this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); + if (options.timeoutMode == null) { + if (options.tailable) { + this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; + + if (options.awaitData) { + if ( + options.maxAwaitTimeMS != null && + options.maxAwaitTimeMS >= this.cursorOptions.timeoutMS + ) + throw new MongoInvalidArgumentError( + 'Cannot specify maxAwaitTimeMS >= timeoutMS for a tailable awaitData cursor' + ); + } + } else { + this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; + } + } else { + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError( + "Cannot set tailable cursor's timeoutMode to LIFETIME" + ); + } + this.cursorOptions.timeoutMode = options.timeoutMode; } this.cursorOptions.timeoutMode = options.timeoutMode ?? @@ -223,6 +246,8 @@ export abstract class AbstractCursor< if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } + + // Set for initial command this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null && ((this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && @@ -781,15 +806,17 @@ export abstract class AbstractCursor< 'Unexpected null selectedServer. A cursor creating command should have set this' ); } + const getMoreOptions = { + ...this.cursorOptions, + session: this.cursorSession, + batchSize + }; + const getMoreOperation = new GetMoreOperation( this.cursorNamespace, this.cursorId, this.selectedServer, - { - ...this.cursorOptions, - session: this.cursorSession, - batchSize - } + getMoreOptions ); return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); @@ -814,6 +841,8 @@ export abstract class AbstractCursor< } try { const state = await this._initialize(this.cursorSession); + // Set omitMaxTimeMS to the value needed for subsequent getMore calls + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; const response = state.response; this.selectedServer = state.server; this.cursorId = response.id; @@ -866,9 +895,9 @@ export abstract class AbstractCursor< } catch (error) { try { await this.cleanup(undefined, error); - } catch (error) { + } catch (cleanupError) { // `cleanupCursor` should never throw, squash and throw the original error - squashError(error); + squashError(cleanupError); } throw error; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 6b31ce2263a..90e4a94fd42 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -23,6 +23,8 @@ export type RunCursorCommandOptions = { timeoutMS?: number; /** @internal */ timeoutMode?: CursorTimeoutMode; + tailable?: boolean; + awaitData?: boolean; } & BSONSerializeOptions; /** @public */ diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 49201910362..cb66fb0bfd2 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -483,6 +483,11 @@ export class MongoClient extends TypedEventEmitter implements return this.s.bsonOptions; } + /** @internal */ + get timeoutMS(): number | undefined { + return this.options.timeoutMS; + } + /** * Executes a client bulk write operation, available on server 8.0+. * @param models - The client bulk write models. diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index afb2680b9a0..293ecc8be52 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -17,6 +17,7 @@ import { Aspect, defineAspects } from './operation'; const ILLEGAL_COMMAND_FIELDS = new Set([ 'w', 'wtimeout', + 'timeoutMS', 'j', 'fsync', 'autoIndexId', diff --git a/test/benchmarks/driverBench/common.js b/test/benchmarks/driverBench/common.js index bb5b48babfd..3ffd309572a 100644 --- a/test/benchmarks/driverBench/common.js +++ b/test/benchmarks/driverBench/common.js @@ -24,7 +24,9 @@ function loadSpecString(filePath) { } function makeClient() { - this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017'); + this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017', { + timeoutMS: 0 + }); } function connectClient() { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1b8c34633b4..09b95d6dff0 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -77,7 +77,7 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { await internalClient .db('db') - .collection('coll') + .collection('bulkWriteTest') .drop() .catch(() => null); await internalClient.db('admin').command(failpoint); @@ -93,7 +93,7 @@ describe('CSOT spec prose tests', function () { const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); const error = await client .db('db') - .collection<{ _id: number; a: Uint8Array }>('coll') + .collection<{ _id: number; a: Uint8Array }>('bulkWriteTest') .insertMany(oneMBDocs) .catch(error => error); @@ -265,6 +265,7 @@ describe('CSOT spec prose tests', function () { }); context('5. Blocking Iteration Methods', () => { + const metadata = { requires: { mongodb: '>=4.4' } }; /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -276,7 +277,7 @@ describe('CSOT spec prose tests', function () { data: { failCommands: ['getMore'], blockConnection: true, - blockTimeMS: 20 + blockTimeMS: 90 } }; let internalClient: MongoClient; @@ -286,7 +287,11 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { internalClient = this.configuration.newClient(); - await internalClient.db('db').dropCollection('coll'); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); // Creating capped collection to be able to create tailable find cursor const coll = await internalClient .db('db') @@ -294,7 +299,13 @@ describe('CSOT spec prose tests', function () { await coll.insertOne({ x: 1 }); await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + timeoutMS: 100, + minPoolSize: 20 + }); + await client.connect(); + commandStarted = []; commandSucceeded = []; @@ -337,11 +348,11 @@ describe('CSOT spec prose tests', function () { * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('send correct number of finds and getMores', async function () { + it('send correct number of finds and getMores', metadata, async function () { const cursor = client .db('db') .collection('coll') - .find({}, { tailable: true, awaitData: true }) + .find({}, { tailable: true }) .project({ _id: 0 }); const doc = await cursor.next(); expect(doc).to.deep.equal({ x: 1 }); @@ -358,7 +369,7 @@ describe('CSOT spec prose tests', function () { expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); // Expect 2 getMore expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); - }).skipReason = 'TODO(NODE-6305)'; + }); }); context('Change Streams', () => { @@ -383,8 +394,11 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('sends correct number of aggregate and getMores', async function () { - const changeStream = client.db('db').collection('coll').watch(); + it.skip('sends correct number of aggregate and getMores', metadata, async function () { + const changeStream = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: 20, maxAwaitTimeMS: 19 }); const maybeError = await changeStream.next().then( () => null, e => e @@ -397,9 +411,9 @@ describe('CSOT spec prose tests', function () { const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); // Expect 1 aggregate expect(aggregates).to.have.lengthOf(1); - // Expect 1 getMore - expect(getMores).to.have.lengthOf(1); - }).skipReason = 'TODO(NODE-6305)'; + // Expect 2 getMores + expect(getMores).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6387)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 49ddabc924b..d72e9bc5ebe 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -25,7 +25,12 @@ const skippedTests = { 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': 'TODO(DRIVERS-2965)', 'maxTimeMS value in the command is less than timeoutMS': - 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(DRIVERS-2965)', + 'timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs' // Skipping for both tailable awaitData and tailable non-awaitData cursors }; describe('CSOT spec tests', function () { diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f4cfc7d882c..b1516454cc7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -31,13 +31,18 @@ import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; describe('CSOT driver tests', metadata, () => { + // NOTE: minPoolSize here is set to ensure that connections are available when testing timeout + // behaviour. This reduces flakiness in our tests since operations will not spend time + // establishing connections, more closely mirroring long-running application behaviour + const minPoolSize = 20; + describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; let coll: Collection; beforeEach(async function () { - client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + client = this.configuration.newClient(undefined, { timeoutMS: 100, minPoolSize }); db = client.db('test', { timeoutMS: 200 }); }); @@ -159,7 +164,10 @@ describe('CSOT driver tests', metadata, () => { metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; - client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + timeoutMS: 1, + monitorCommands: true + }); client.on('commandStarted', ev => commandsStarted.push(ev)); @@ -591,6 +599,211 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('Tailable cursors', function () { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate', 'find', 'getMore'], + blockConnection: true, + blockTimeMS: 100 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + + await internalClient.db('db').createCollection('coll', { capped: true, size: 1_000_000 }); + + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 100 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize }); + commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client.connect(); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('when in ITERATION mode', function () { + context('awaitData cursors', function () { + let cursor: FindCursor; + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, awaitData: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, awaitData: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not use timeoutMS to compute maxTimeMS for getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 10_000, tailable: true, awaitData: true, batchSize: 1 }); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.not.haveOwnProperty('maxTimeMS'); + }); + + context('when maxAwaitTimeMS is specified', function () { + it( + 'sets maxTimeMS to the configured maxAwaitTimeMS value on getMores', + metadata, + async function () { + cursor = client.db('db').collection('coll').find( + {}, + { + timeoutMS: 10_000, + tailable: true, + awaitData: true, + batchSize: 1, + maxAwaitTimeMS: 100 + } + ); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.haveOwnProperty('maxTimeMS'); + expect(getMore.maxTimeMS).to.equal(100); + } + ); + }); + }); + + context('non-awaitData cursors', function () { + let cursor: FindCursor; + + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not append a maxTimeMS field to original command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + + const finds = commandStarted.filter(x => x.command.find != null); + expect(finds).to.have.lengthOf(1); + expect(finds[0].command.find).to.exist; + expect(finds[0].command.maxTimeMS).to.not.exist; + }); + it('does not append a maxTimeMS field to subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted.filter(x => x.command.getMore != null); + + expect(getMores).to.have.lengthOf(1); + expect(getMores[0].command.getMore).to.exist; + expect(getMores[0].command.getMore.maxTimeMS).to.not.exist; + }); + }); + }); + }); + describe('GridFSBucket', () => { const blockTimeMS = 200; let internalClient: MongoClient; @@ -798,6 +1011,10 @@ describe('CSOT driver tests', metadata, () => { beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 123 }); + await client + .db('db') + .dropCollection('coll') + .catch(() => null); }); afterEach(async function () { diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json new file mode 100644 index 00000000000..17da3e3c0c9 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json @@ -0,0 +1,146 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json new file mode 100644 index 00000000000..80cf74a1116 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json @@ -0,0 +1,151 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index a9f79842c31..f7c34a70239 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -268,7 +268,18 @@ operations.set('createCollection', async ({ entities, operation }) => { operations.set('createFindCursor', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.find(filter, opts); // The spec dictates that we create the cursor and force the find command // to execute, but don't move the cursor forward. hasNext() accomplishes @@ -332,7 +343,18 @@ operations.set('find', async ({ entities, operation }) => { } else { queryable = entities.getEntity('collection', operation.object); } - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } return queryable.find(filter, opts).toArray(); }); @@ -804,10 +826,25 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc operations.set('createCommandCursor', async ({ entities, operation }: OperationFunctionParams) => { const collection = entities.getEntity('db', operation.object); - const { command, ...opts } = operation.arguments!; + const { command, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + tailable: opts.tailable, + awaitData: opts.awaitData, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); From a645d9f0fa8740e22164ef703aea04669c50b141 Mon Sep 17 00:00:00 2001 From: Aditi Khare <106987683+aditi-khare-mongoDB@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:37:08 -0400 Subject: [PATCH 088/136] feat(NODE-6389): add support for timeoutMS in StateMachine.execute() (#4243) Co-authored-by: Warren James Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- src/client-side-encryption/state_machine.ts | 88 +++++++---- src/sdam/server.ts | 4 + ...ient_side_operations_timeout.prose.test.ts | 87 +++++++++-- ...lient_side_operations_timeout.unit.test.ts | 104 +++++++++++-- .../state_machine.test.ts | 143 +++++++++++++++++- 5 files changed, 371 insertions(+), 55 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index af3ea4c215d..f47ee191b54 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -12,7 +12,9 @@ import { } from '../bson'; import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; +import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; +import { Timeout, type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -173,6 +175,7 @@ export type StateMachineOptions = { * An internal class that executes across a MongoCryptContext until either * a finishing state or an error is reached. Do not instantiate directly. */ +// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs export class StateMachine { constructor( private options: StateMachineOptions, @@ -182,7 +185,11 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext): Promise { + async execute( + executor: StateMachineExecutable, + context: MongoCryptContext, + timeoutContext?: TimeoutContext + ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -201,8 +208,13 @@ export class StateMachine { 'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined' ); } - const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); + const collInfo = await this.fetchCollectionInfo( + metaDataClient, + context.ns, + filter, + timeoutContext + ); if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -222,9 +234,9 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind(this, mongocryptdClient, context.ns, command) + this.markCommand.bind(this, mongocryptdClient, context.ns, command, timeoutContext) ) - : await this.markCommand(mongocryptdClient, context.ns, command); + : await this.markCommand(mongocryptdClient, context.ns, command, timeoutContext); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -233,7 +245,12 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); - const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); + const keys = await this.fetchKeys( + keyVaultClient, + keyVaultNamespace, + filter, + timeoutContext + ); if (keys.length === 0) { // See docs on EMPTY_V @@ -255,9 +272,7 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - const requests = Array.from(this.requests(context)); - await Promise.all(requests); - + await Promise.all(this.requests(context, timeoutContext)); context.finishKMSRequests(); break; } @@ -299,7 +314,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutContext?: TimeoutContext): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -329,10 +344,6 @@ export class StateMachine { } } - function ontimeout() { - return new MongoCryptError('KMS request timed out'); - } - function onerror(cause: Error) { return new MongoCryptError('KMS request failed', { cause }); } @@ -364,7 +375,6 @@ export class StateMachine { resolve: resolveOnNetSocketConnect } = promiseWithResolvers(); netSocket - .once('timeout', () => rejectOnNetSocketError(ontimeout())) .once('error', err => rejectOnNetSocketError(onerror(err))) .once('close', () => rejectOnNetSocketError(onclose())) .once('connect', () => resolveOnNetSocketConnect()); @@ -410,8 +420,8 @@ export class StateMachine { reject: rejectOnTlsSocketError, resolve } = promiseWithResolvers(); + socket - .once('timeout', () => rejectOnTlsSocketError(ontimeout())) .once('error', err => rejectOnTlsSocketError(onerror(err))) .once('close', () => rejectOnTlsSocketError(onclose())) .on('data', data => { @@ -425,20 +435,26 @@ export class StateMachine { resolve(); } }); - await willResolveKmsRequest; + await (timeoutContext?.csotEnabled() + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) + : willResolveKmsRequest); + } catch (error) { + if (error instanceof TimeoutError) + throw new MongoOperationTimeoutError('KMS request timed out'); + throw error; } finally { // There's no need for any more activity on this socket at this point. destroySockets(); } } - *requests(context: MongoCryptContext) { + *requests(context: MongoCryptContext, timeoutContext?: TimeoutContext) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request); + yield this.kmsRequest(request, timeoutContext); } } @@ -498,7 +514,8 @@ export class StateMachine { async fetchCollectionInfo( client: MongoClient, ns: string, - filter: Document + filter: Document, + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); @@ -506,7 +523,10 @@ export class StateMachine { .db(db) .listCollections(filter, { promoteLongs: false, - promoteValues: false + promoteValues: false, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {}) }) .toArray(); @@ -522,12 +542,22 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array): Promise { - const options = { promoteLongs: false, promoteValues: false }; + async markCommand( + client: MongoClient, + ns: string, + command: Uint8Array, + timeoutContext?: TimeoutContext + ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const rawCommand = deserialize(command, options); + const bsonOptions = { promoteLongs: false, promoteValues: false }; + const rawCommand = deserialize(command, bsonOptions); - const response = await client.db(db).command(rawCommand, options); + const response = await client.db(db).command(rawCommand, { + ...bsonOptions, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS } + : undefined) + }); return serialize(response, this.bsonOptions); } @@ -543,7 +573,8 @@ export class StateMachine { fetchKeys( client: MongoClient, keyVaultNamespace: string, - filter: Uint8Array + filter: Uint8Array, + timeoutContext?: TimeoutContext ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -551,7 +582,12 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter)) + .find( + deserialize(filter), + timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {} + ) .toArray(); } } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 08325086d53..7ab2d9a043f 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,6 +311,10 @@ export class Server extends TypedEventEmitter { delete finalOptions.readPreference; } + if (this.description.iscryptd) { + finalOptions.omitMaxTimeMS = true; + } + const session = finalOptions.session; let conn = session?.pinnedConnection; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 09b95d6dff0..80da92e10a3 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,5 +1,7 @@ /* Specification prose tests */ +import { type ChildProcess, spawn } from 'node:child_process'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -16,7 +18,8 @@ import { MongoServerSelectionError, now, ObjectId, - promiseWithResolvers + promiseWithResolvers, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -103,17 +106,55 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { - /** - * This test MUST only be run against enterprise server versions 4.2 and higher. - * - * 1. Launch a mongocryptd process on 23000. - * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. - * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. - * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. - */ - }); + context( + '2. maxTimeMS is not set for commands sent to mongocryptd', + { requires: { mongodb: '>=4.2' } }, + () => { + /** + * This test MUST only be run against enterprise server versions 4.2 and higher. + * + * 1. Launch a mongocryptd process on 23000. + * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. + * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. + * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + */ + + let client: MongoClient; + const mongocryptdTestPort = '23000'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { + monitorCommands: true + }); + }); + + afterEach(async function () { + await client.close(); + childProcess.kill('SIGKILL'); + sinon.restore(); + }); + + it('maxTimeMS is not set', async function () { + const commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client + .db('admin') + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(commandStarted).to.have.lengthOf(1); + expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); + }); + } + ); + // TODO(NODE-6391): Add timeoutMS support to Explicit Encryption context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, @@ -720,6 +761,30 @@ describe('CSOT spec prose tests', function () { 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 944d9b96048..7387099a7f1 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -6,8 +6,22 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; - -import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; +import { setTimeout } from 'timers'; +import { TLSSocket } from 'tls'; +import { promisify } from 'util'; + +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + ConnectionPool, + CSOTTimeoutContext, + type MongoClient, + MongoOperationTimeoutError, + Timeout, + TimeoutContext, + Topology +} from '../../mongodb'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -93,17 +107,83 @@ describe('CSOT spec unit tests', function () { }).skipReason = 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; - context.skip('Client side encryption', function () { - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); + describe('Client side encryption', function () { + describe('KMS requests', function () { + const stateMachine = new StateMachine({} as any); + const request = { + addResponse: _response => {}, + status: { + type: 1, + code: 1, + message: 'notARealStatus' + }, + bytesNeeded: 500, + kmsProvider: 'notRealAgain', + endpoint: 'fake', + message: Buffer.from('foobar') + }; + + context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) {}); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request times out through remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); + + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); + }); + }); + }); - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + // TODO(NODE-6390): Add timeoutMS support to Auto Encryption + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); + }); context.skip('Background Connection Pooling', function () { context( diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..95bb6056355 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -12,9 +12,17 @@ import * as tls from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { Db } from '../../../src/db'; -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { MongoClient } from '../../../src/mongo_client'; -import { Int32, Long, serialize } from '../../mongodb'; +import { + BSON, + Collection, + CSOTTimeoutContext, + Int32, + Long, + MongoClient, + serialize, + squashError +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; describe('StateMachine', function () { class MockRequest implements MongoCryptKMSRequest { @@ -74,12 +82,10 @@ describe('StateMachine', function () { const options = { promoteLongs: false, promoteValues: false }; const serializedCommand = serialize(command); const stateMachine = new StateMachine({} as any); - // eslint-disable-next-line @typescript-eslint/no-empty-function - const callback = () => {}; context('when executing the command', function () { it('does not promote values', function () { - stateMachine.markCommand(clientStub, 'test.coll', serializedCommand, callback); + stateMachine.markCommand(clientStub, 'test.coll', serializedCommand); expect(runCommandStub.calledWith(command, options)).to.be.true; }); }); @@ -461,4 +467,129 @@ describe('StateMachine', function () { expect.fail('missed exception'); }); }); + + describe('CSOT', function () { + describe('#fetchKeys', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let findSpy; + + beforeEach(async function () { + findSpy = sinon.spy(Collection.prototype, 'find'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.fetchKeys() is passed a `CSOTimeoutContext`', function () { + it('collection.find runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.fetchKeys() is not passed a `CSOTimeoutContext`', function () { + it('collection.find runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#markCommand', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let dbCommandSpy; + + beforeEach(async function () { + dbCommandSpy = sinon.spy(Db.prototype, 'command'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.markCommand() is passed a `CSOTimeoutContext`', function () { + it('db.command runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.markCommand() is not passed a `CSOTimeoutContext`', function () { + it('db.command runs with an undefined timeoutMS property', async function () { + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let listCollectionsSpy; + + beforeEach(async function () { + listCollectionsSpy = sinon.spy(Db.prototype, 'listCollections'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context( + 'when StateMachine.fetchCollectionInfo() is passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + } + ); + + context( + 'when StateMachine.fetchCollectionInfo() is not passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + } + ); + }); + }); }); From c0d6ec9a0d0a0bae520dc20abb13115c5abaf37d Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Oct 2024 16:44:47 -0400 Subject: [PATCH 089/136] fix(NODE-6412): read stale response from previously timed out connection (#4273) --- src/cmap/connection.ts | 5 +- ...lient_side_operations_timeout.spec.test.ts | 6 +++ .../node_csot.test.ts | 46 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index a43d6106c7b..a58ef566b7c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -747,9 +747,12 @@ export class Connection extends TypedEventEmitter { } } catch (readError) { if (TimeoutError.is(readError)) { - throw new MongoOperationTimeoutError( + const error = new MongoOperationTimeoutError( `Timed out during socket read (${readError.duration}ms)` ); + this.dataEvents = null; + this.onError(error); + throw error; } throw readError; } finally { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index d72e9bc5ebe..c519da8039f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -48,6 +48,12 @@ describe('CSOT spec tests', function () { runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + configuration.topologyType === 'LoadBalanced' && + test.description === 'timeoutMS is refreshed for close' + ) { + return 'LoadBalanced cannot refresh timeoutMS and run expected killCursors because pinned connection has been closed by the timeout'; + } if ( sessionCSOTTests.includes(test.description) && configuration.topologyType === 'ReplicaSetWithPrimary' && diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b1516454cc7..68d7b16f54d 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1116,4 +1116,50 @@ describe('CSOT driver tests', metadata, () => { ); }); }); + + describe('Connection after timeout', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500 }); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 700 + } + }; + + await client.db('admin').command(failpoint); + }); + + afterEach(async function () { + await client.close(); + }); + + it('closes so pending messages are not read by another operation', async function () { + const cmap = []; + client.on('connectionCheckedOut', ev => cmap.push(ev)); + client.on('connectionClosed', ev => cmap.push(ev)); + + const error = await client + .db('socket') + .collection('closes') + .insertOne({}) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(cmap).to.have.lengthOf(2); + + const [checkedOut, closed] = cmap; + expect(checkedOut).to.have.property('name', 'connectionCheckedOut'); + expect(closed).to.have.property('name', 'connectionClosed'); + expect(checkedOut).to.have.property('connectionId', closed.connectionId); + }); + }); }); From 7df1a70381d319fcc597a2578871e5131bccaa58 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Mon, 14 Oct 2024 09:38:50 -0600 Subject: [PATCH 090/136] feat(NODE-6403): add CSOT support to client bulk write (#4261) Co-authored-by: Warren James --- src/cmap/connection.ts | 2 + src/cmap/wire_protocol/on_data.ts | 1 + src/cursor/abstract_cursor.ts | 2 +- src/cursor/client_bulk_write_cursor.ts | 8 +- src/operations/client_bulk_write/executor.ts | 16 +- src/sdam/server.ts | 2 +- src/utils.ts | 13 + ...ient_side_operations_timeout.prose.test.ts | 29 +- .../node_csot.test.ts | 16 +- .../collection_db_management.test.ts | 4 +- .../crud/client_bulk_write.test.ts | 384 ++++++++++++++++++ test/tools/runner/config.ts | 28 +- test/tools/utils.ts | 67 +++ 13 files changed, 535 insertions(+), 37 deletions(-) create mode 100644 test/integration/crud/client_bulk_write.test.ts diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index a58ef566b7c..2e2900e40ae 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -716,6 +716,8 @@ export class Connection extends TypedEventEmitter { throw new MongoOperationTimeoutError('Timed out at socket write'); } throw error; + } finally { + timeout.clear(); } } return await drainEvent; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 64c636f41f1..f6732618330 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -116,6 +116,7 @@ export function onData( emitter.off('data', eventHandler); emitter.off('error', errorHandler); finished = true; + timeoutForSocketRead?.clear(); const doneResult = { value: undefined, done: finished } as const; for (const promise of unconsumedPromises) { diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 255a977a5f9..96d28d05584 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -243,7 +243,7 @@ export abstract class AbstractCursor< options.timeoutMode ?? (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { - if (options.timeoutMode != null) + if (options.timeoutMode != null && options.timeoutContext == null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts index 3a4e7eb99aa..72c73caad64 100644 --- a/src/cursor/client_bulk_write_cursor.ts +++ b/src/cursor/client_bulk_write_cursor.ts @@ -35,7 +35,7 @@ export class ClientBulkWriteCursor extends AbstractCursor { constructor( client: MongoClient, commandBuilder: ClientBulkWriteCommandBuilder, - options: ClientBulkWriteOptions = {} + options: ClientBulkWriteCursorOptions = {} ) { super(client, new MongoDBNamespace('admin', '$cmd'), options); @@ -72,7 +72,11 @@ export class ClientBulkWriteCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, clientBulkWriteOperation); + const response = await executeOperation( + this.client, + clientBulkWriteOperation, + this.timeoutContext + ); this.cursorResponse = response; return { server: clientBulkWriteOperation.server, session, response }; diff --git a/src/operations/client_bulk_write/executor.ts b/src/operations/client_bulk_write/executor.ts index 93acaac2160..6aac96aa631 100644 --- a/src/operations/client_bulk_write/executor.ts +++ b/src/operations/client_bulk_write/executor.ts @@ -1,3 +1,4 @@ +import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor'; import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor'; import { MongoClientBulkWriteError, @@ -5,6 +6,8 @@ import { MongoServerError } from '../../error'; import { type MongoClient } from '../../mongo_client'; +import { TimeoutContext } from '../../timeout'; +import { resolveTimeoutOptions } from '../../utils'; import { WriteConcern } from '../../write_concern'; import { executeOperation } from '../execute_operation'; import { ClientBulkWriteOperation } from './client_bulk_write'; @@ -70,17 +73,26 @@ export class ClientBulkWriteExecutor { pkFactory ); // Unacknowledged writes need to execute all batches and return { ok: 1} + const resolvedOptions = resolveTimeoutOptions(this.client, this.options); + const context = TimeoutContext.create(resolvedOptions); + if (this.options.writeConcern?.w === 0) { while (commandBuilder.hasNextBatch()) { const operation = new ClientBulkWriteOperation(commandBuilder, this.options); - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, context); } return { ok: 1 }; } else { const resultsMerger = new ClientBulkWriteResultsMerger(this.options); // For each command will will create and exhaust a cursor for the results. while (commandBuilder.hasNextBatch()) { - const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options); + const cursorContext = new CursorTimeoutContext(context, Symbol()); + const options = { + ...this.options, + timeoutContext: cursorContext, + ...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME }) + }; + const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options); try { await resultsMerger.merge(cursor); } catch (error) { diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 7ab2d9a043f..35a6f1de695 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -106,7 +106,7 @@ export type ServerEvents = { EventEmitterWithState; /** @internal */ -export type ServerCommandOptions = Omit & { +export type ServerCommandOptions = Omit & { timeoutContext: TimeoutContext; }; diff --git a/src/utils.ts b/src/utils.ts index 04174813c9c..15b3bab90f3 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -35,6 +35,7 @@ import { ServerType } from './sdam/common'; import type { Server } from './sdam/server'; import type { Topology } from './sdam/topology'; import type { ClientSession } from './sessions'; +import { type TimeoutContextOptions } from './timeout'; import { WriteConcern } from './write_concern'; /** @@ -514,6 +515,18 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { return keys.length > 0 && keys[0][0] === '$'; } +export function resolveTimeoutOptions>( + client: MongoClient, + options: T +): T & + Pick< + MongoClient['s']['options'], + 'timeoutMS' | 'serverSelectionTimeoutMS' | 'waitQueueTimeoutMS' | 'socketTimeoutMS' + > { + const { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS } = + client.s.options; + return { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS, ...options }; +} /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 80da92e10a3..458447a437c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -21,7 +21,8 @@ import { promiseWithResolvers, squashError } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, makeMultiBatchWrite } from '../../tools/utils'; +import { filterForCommands } from '../shared'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -1183,9 +1184,9 @@ describe('CSOT spec prose tests', function () { }); }); - describe.skip( + describe( '11. Multi-batch bulkWrites', - { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + { requires: { mongodb: '>=8.0', serverless: 'forbid', topology: 'single' } }, function () { /** * ### 11. Multi-batch bulkWrites @@ -1245,9 +1246,6 @@ describe('CSOT spec prose tests', function () { } }; - let maxBsonObjectSize: number; - let maxMessageSizeBytes: number; - beforeEach(async function () { await internalClient .db('db') @@ -1256,29 +1254,20 @@ describe('CSOT spec prose tests', function () { .catch(() => null); await internalClient.db('admin').command(failpoint); - const hello = await internalClient.db('admin').command({ hello: 1 }); - maxBsonObjectSize = hello.maxBsonObjectSize; - maxMessageSizeBytes = hello.maxMessageSizeBytes; - client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); }); - it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + it('performs two bulkWrites which fail to complete before 2000 ms', async function () { const writes = []; - client.on('commandStarted', ev => writes.push(ev)); + client.on('commandStarted', filterForCommands('bulkWrite', writes)); - const length = maxMessageSizeBytes / maxBsonObjectSize + 1; - const models = Array.from({ length }, () => ({ - namespace: 'db.coll', - name: 'insertOne' as const, - document: { a: 'b'.repeat(maxBsonObjectSize - 500) } - })); + const models = await makeMultiBatchWrite(this.configuration); const error = await client.bulkWrite(models).catch(error => error); expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); - expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); - }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + expect(writes).to.have.lengthOf(2); + }); } ); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 68d7b16f54d..a981a9113df 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -279,12 +279,16 @@ describe('CSOT driver tests', metadata, () => { .stub(Connection.prototype, 'readMany') .callsFake(async function* (...args) { const realIterator = readManyStub.wrappedMethod.call(this, ...args); - const cmd = commandSpy.lastCall.args.at(1); - if ('giveMeWriteErrors' in cmd) { - await realIterator.next().catch(() => null); // dismiss response - yield { parse: () => writeErrorsReply }; - } else { - yield (await realIterator.next()).value; + try { + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + } finally { + realIterator.return(); } }); }); diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts index f5c4c55cf05..0cb90b3b592 100644 --- a/test/integration/collection-management/collection_db_management.test.ts +++ b/test/integration/collection-management/collection_db_management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection, type Db, type MongoClient } from '../../mongodb'; +import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb'; describe('Collection Management and Db Management', function () { let client: MongoClient; @@ -16,7 +16,7 @@ describe('Collection Management and Db Management', function () { }); it('returns a collection object after calling createCollection', async function () { - const collection = await db.createCollection('collection'); + const collection = await db.createCollection(new ObjectId().toHexString()); expect(collection).to.be.instanceOf(Collection); }); diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts new file mode 100644 index 00000000000..6177077b632 --- /dev/null +++ b/test/integration/crud/client_bulk_write.test.ts @@ -0,0 +1,384 @@ +import { expect } from 'chai'; +import { setTimeout } from 'timers/promises'; + +import { + type CommandStartedEvent, + type Connection, + type ConnectionPool, + type MongoClient, + MongoOperationTimeoutError, + now, + TimeoutContext +} from '../../mongodb'; +import { + clearFailPoint, + configureFailPoint, + makeMultiBatchWrite, + makeMultiResponseBatchModelArray +} from '../../tools/utils'; +import { filterForCommands } from '../shared'; + +const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=8.0', + serverless: 'forbid' + } +}; + +describe('Client Bulk Write', function () { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + await clearFailPoint(this.configuration); + }); + + describe('CSOT enabled', function () { + describe('when timeoutMS is set on the client', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 300 }); + await client.connect(); + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite([ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ]) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on the bulkWrite operation', function () { + beforeEach(async function () { + client = this.configuration.newClient({}); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on both the client and operation options', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('bulk write options take precedence over the client options', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe( + 'unacknowledged writes', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + function () { + let connection: Connection; + let pool: ConnectionPool; + + beforeEach(async function () { + client = this.configuration.newClient({}, { maxPoolSize: 1, waitQueueTimeoutMS: 2000 }); + + await client.connect(); + + pool = Array.from(client.topology.s.servers.values())[0].pool; + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + }); + + afterEach(async function () { + pool = Array.from(client.topology.s.servers.values())[0].pool; + pool.checkIn(connection); + await client.close(); + }); + + it('a single batch bulk write does not take longer than timeoutMS', async function () { + const start = now(); + let end; + const timeoutError = client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 200, writeConcern: { w: 0 } } + ) + .catch(e => e) + .then(e => { + end = now(); + return e; + }); + + await setTimeout(250); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(200 - 100, 200 + 100); + }); + + it( + 'timeoutMS applies to all batches', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + let end; + const timeoutError = client + .bulkWrite(models, { + timeoutMS: 400, + writeConcern: { w: 0 } + }) + .catch(e => e) + .then(r => { + end = now(); + return r; + }); + + await setTimeout(210); + + pool.checkIn(connection); + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + + await setTimeout(210); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(400 - 100, 400 + 100); + } + ); + } + ); + + describe('acknowledged writes', metadata, function () { + describe('when a bulk write command times out', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('the operation times out', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when the timeout is reached while iterating the result cursor', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true, minPoolSize: 5 }); + client.on('commandStarted', filterForCommands(['getMore'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1400, failCommands: ['getMore'] } + }); + }); + + it('the bulk write operation times out', metadata, async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + verboseResults: true, + timeoutMS: 1500 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + // DRIVERS-3005 - killCursors causes cursor cleanup to extend past timeoutMS. + // The amount of time killCursors takes is wildly variable and can take up to almost + // 600-700ms sometimes. + expect(end - start).to.be.within(1500, 1500 + 800); + expect(commands).to.have.lengthOf(1); + }); + }); + + describe('if the cursor encounters an error and a killCursors is sent', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands(['killCursors'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + blockConnection: true, + blockTimeMS: 3000, + failCommands: ['getMore', 'killCursors'] + } + }); + }); + + it( + 'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command', + metadata, + async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const timeoutError = await client + .bulkWrite(models, { ordered: true, timeoutMS: 2800, verboseResults: true }) + .catch(e => e); + + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + expect(maxTimeMS).to.be.greaterThan(1000); + } + ); + }); + + describe('when the bulk write is executed in multiple batches', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { blockConnection: true, blockTimeMS: 1010, failCommands: ['bulkWrite'] } + }); + }); + + it( + 'timeoutMS applies to the duration of all batches', + { + requires: { + ...metadata.requires, + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + timeoutMS: 2000 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(2000 - 100, 2000 + 100); + expect(commands.length, 'Test must execute two batches.').to.equal(2); + } + ); + }); + }); + }); +}); diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 1d637486226..16024638fba 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -7,6 +7,7 @@ import { type AuthMechanism, HostAddress, MongoClient, + type MongoClientOptions, type ServerApi, TopologyType, type WriteConcernSettings @@ -82,7 +83,7 @@ export class TestConfiguration { auth?: { username: string; password: string; authSource?: string }; proxyURIParams?: ProxyParams; }; - serverApi: ServerApi; + serverApi?: ServerApi; activeResources: number; isSrv: boolean; serverlessCredentials: { username: string | undefined; password: string | undefined }; @@ -171,13 +172,34 @@ export class TestConfiguration { return this.options.replicaSet; } + /** + * Returns a `hello`, executed against `uri`. + */ + async hello(uri = this.uri) { + const client = this.newClient(uri); + try { + await client.connect(); + const { maxBsonObjectSize, maxMessageSizeBytes, maxWriteBatchSize, ...rest } = await client + .db('admin') + .command({ hello: 1 }); + return { + maxBsonObjectSize, + maxMessageSizeBytes, + maxWriteBatchSize, + ...rest + }; + } finally { + await client.close(); + } + } + isOIDC(uri: string, env: string): boolean { if (!uri) return false; return uri.indexOf('MONGODB-OIDC') > -1 && uri.indexOf(`ENVIRONMENT:${env}`) > -1; } - newClient(urlOrQueryOptions?: string | Record, serverOptions?: Record) { - serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); + newClient(urlOrQueryOptions?: string | Record, serverOptions?: MongoClientOptions) { + serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); // Support MongoClient constructor form (url, options) for `newClient`. if (typeof urlOrQueryOptions === 'string') { diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 8614bd7d64c..8ebc5e8f532 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -11,6 +11,7 @@ import { setTimeout } from 'timers'; import { inspect, promisify } from 'util'; import { + type AnyClientBulkWriteModel, type Document, type HostAddress, MongoClient, @@ -18,6 +19,7 @@ import { Topology, type TopologyOptions } from '../mongodb'; +import { type TestConfiguration } from './runner/config'; import { runUnifiedSuite } from './unified-spec-runner/runner'; import { type CollectionData, @@ -598,3 +600,68 @@ export async function waitUntilPoolsFilled( await Promise.all([wait$(), client.connect()]); } + +export async function configureFailPoint(configuration: TestConfiguration, failPoint: FailPoint) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command(failPoint); + } finally { + await utilClient.close(); + } +} + +export async function clearFailPoint(configuration: TestConfiguration) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command({ + configureFailPoint: 'failCommand', + mode: 'off' + }); + } finally { + await utilClient.close(); + } +} + +export async function makeMultiBatchWrite( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize, maxMessageSizeBytes } = await configuration.hello(); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + return models; +} + +export async function makeMultiResponseBatchModelArray( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize } = await configuration.hello(); + const namespace = `foo.${new BSON.ObjectId().toHexString()}`; + const models: AnyClientBulkWriteModel[] = [ + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) } + }, + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) } + } + ]; + + return models; +} From 6330fd65128eb4eb6db3a3d47edf40575067d95e Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 11 Apr 2024 17:15:41 -0400 Subject: [PATCH 091/136] feat(NODE-6090): Implement CSOT logic for connection checkout and server selection --- src/admin.ts | 3 +- src/cmap/connection.ts | 4 + src/cmap/connection_pool.ts | 53 ++- src/collection.ts | 5 + src/db.ts | 6 + src/error.ts | 9 + src/index.ts | 1 + src/operations/command.ts | 2 + src/operations/find.ts | 3 +- src/operations/operation.ts | 8 + src/operations/run_command.ts | 9 +- src/sdam/server.ts | 3 +- src/sdam/topology.ts | 54 ++- src/timeout.ts | 14 + src/utils.ts | 10 + ...ient_side_operations_timeout.prose.test.ts | 315 +++++++++++++----- ...lient_side_operations_timeout.unit.test.ts | 140 +++++--- .../node_csot.test.ts | 75 ++++- test/unit/cmap/connection_pool.test.js | 33 +- test/unit/index.test.ts | 1 + 20 files changed, 570 insertions(+), 178 deletions(-) diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..e030384eafc 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 0837c54d3fa..507b95b0b98 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,6 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type Timeout } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -94,6 +95,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeout?: Timeout; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..79440db1e06 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,13 +21,14 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; +import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,37 +355,57 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { - const checkoutTime = now(); + async checkOut(options?: { timeout?: Timeout }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + let timeout: Timeout | null = null; + if (options?.timeout) { + // CSOT enabled + // Determine if we're using the timeout passed in or a new timeout + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + // This check determines whether or not Topology.selectServer used the configured + // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + // server selection used `timeoutMS`, so we should use the existing timeout as the timeout + // here + timeout = options.timeout; + } else { + // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with + // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut + // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking + timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); + } + } + } else { + timeout = Timeout.expires(waitQueueTimeoutMS); + } const waitQueueMember: WaitQueueMember = { resolve, - reject, - timeout, - checkoutTime + reject }; this[kWaitQueue].push(waitQueueMember); process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +416,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options?.timeout) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (timeout !== options?.timeout) timeout?.clear(); } } @@ -764,7 +791,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +811,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +853,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..dbd91371cce 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -262,6 +262,11 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior diff --git a/src/db.ts b/src/db.ts index 53c18e44af6..6e1aa194acf 100644 --- a/src/db.ts +++ b/src/db.ts @@ -222,6 +222,11 @@ export class Db { return this.s.namespace.toString(); } + /** @internal */ + get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -272,6 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/error.ts b/src/error.ts index a9178389486..f0441426feb 100644 --- a/src/error.ts +++ b/src/error.ts @@ -857,6 +857,15 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @internal + */ +export class MongoOperationTimeoutError extends MongoRuntimeError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/index.ts b/src/index.ts index 9538ce1d5cc..13df3e8c437 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, diff --git a/src/operations/command.ts b/src/operations/command.ts index 94ccc6ceafe..c64b4ae963a 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -65,6 +65,7 @@ export interface OperationParent { writeConcern?: WriteConcern; readPreference?: ReadPreference; bsonOptions?: BSONSerializeOptions; + timeoutMS?: number; } /** @internal */ @@ -131,6 +132,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeout: this.timeout, readPreference: this.readPreference, session }; diff --git a/src/operations/find.ts b/src/operations/find.ts index 55abe00a923..d34d99cc745 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -123,7 +123,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeout: this.timeout }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 12f168b76e3..e08d25bfec0 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type Timeout } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -57,6 +58,11 @@ export abstract class AbstractOperation { options: OperationOptions; + /** @internal */ + timeout?: Timeout; + /** @internal */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -74,6 +80,8 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; + + this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..56462fa8843 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -14,6 +14,8 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** @internal */ + timeoutMS?: number; } & BSONSerializeOptions; /** @internal */ @@ -39,10 +41,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }, this.options.responseType ); + return res; } } @@ -68,7 +72,8 @@ export class RunAdminCommandOperation extends AbstractOperation const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeout: this.timeout }); return res; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 4c1d37519ad..3d2a3ca1a31 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,7 +311,7 @@ export class Server extends TypedEventEmitter { this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } @@ -336,6 +336,7 @@ export class Server extends TypedEventEmitter { operationError.code === MONGODB_ERROR_CODES.Reauthenticate ) { await this.pool.reauthenticate(conn); + // TODO(NODE-5682): Implement CSOT support for socket read/write at the connection layer try { const res = await conn.command(ns, cmd, finalOptions, responseType); throwIfWriteConcernError(res); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..4c9d71d807d 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -37,6 +38,7 @@ import { Timeout, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, + csotMin, type EventEmitterWithState, HostAddress, List, @@ -107,7 +109,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -457,8 +458,14 @@ export class Topology extends TypedEventEmitter { } } + const timeoutMS = this.client.options.timeoutMS; + const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const selectServerOptions = { + operationName: 'ping', + timeout, + ...options + }; try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), @@ -467,7 +474,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +563,25 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; + let timeout: Timeout | null; + if (options.timeout) { + // CSOT Enabled + if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { + if ( + options.timeout.duration === serverSelectionTimeoutMS || + csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS + ) { + timeout = options.timeout; + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } + } else { + timeout = null; + } + } else { + timeout = Timeout.expires(serverSelectionTimeoutMS); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +604,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (timeout !== options.timeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +617,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +627,14 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +654,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeout) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (timeout !== options.timeout) timeout?.clear(); } } /** @@ -889,8 +922,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +975,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1062,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..7af1a23f261 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -40,6 +40,16 @@ export class Timeout extends Promise { public duration: number; public timedOut = false; + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } + /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = false) { let reject!: Reject; @@ -78,6 +88,10 @@ export class Timeout extends Promise { this.id = undefined; } + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out'); + } + public static expires(durationMS: number, unref?: boolean): Timeout { return new Timeout(undefined, durationMS, unref); } diff --git a/src/utils.ts b/src/utils.ts index 5ad754c9321..ebc0784cb1f 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -544,6 +544,10 @@ export function resolveOptions( result.readPreference = readPreference; } + const timeoutMS = options?.timeoutMS; + + result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + return result; } @@ -1379,6 +1383,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..903ea9c3bb4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,30 @@ /* Specification prose tests */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now +} from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + context.skip('1. Multi-batch writes', () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -31,7 +53,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { + context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { /** * This test MUST only be run against enterprise server versions 4.2 and higher. * @@ -42,7 +64,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('3. ClientEncryption', () => { + context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, * `LOCAL_MASTERKEY` refers to the following base64: @@ -132,7 +154,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('4. Background Connection Pooling', () => { + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -192,7 +214,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('5. Blocking Iteration Methods', () => { + context.skip('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -251,7 +273,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('6. GridFS - Upload', () => { + context.skip('6. GridFS - Upload', () => { /** Tests in this section MUST only be run against server versions 4.4 and higher. */ context('uploads via openUploadStream can be timed out', () => { @@ -306,7 +328,7 @@ describe.skip('CSOT spec prose tests', () => { }); }); - context('7. GridFS - Download', () => { + context.skip('7. GridFS - Download', () => { /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -351,96 +373,225 @@ describe.skip('CSOT spec prose tests', () => { }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }); + }); + + it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); + + it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); }); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + context.skip('9. endSession', () => { /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -472,7 +623,7 @@ describe.skip('CSOT spec prose tests', () => { */ }); - context('10. Convenient Transactions', () => { + context.skip('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..c1426d8db1d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,105 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ +import { expect } from 'chai'; +import * as sinon from 'sinon'; + +import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; + // TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(Timeout.expires).to.have.been.calledWith(10000); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + // Check that we passed through the timeout + expect(checkoutSpy.firstCall.args[0].timeout).to.equal( + selectServerSpy.lastCall.lastArg.timeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + context.skip('Client side encryption', function () { + context( + 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', + () => {} + ); + + context( + 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', + () => {} + ); + }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..5636eb00db7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -7,7 +7,9 @@ import { type Collection, type Db, type FindCursor, - type MongoClient + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoOperationTimeoutError } from '../../mongodb'; describe('CSOT driver tests', () => { @@ -94,4 +96,75 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..18048befab4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,7 +5,7 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); @@ -26,6 +26,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -98,7 +101,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -114,23 +117,15 @@ describe('Connection Pool', function () { pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut(); + const err = await pool.checkOut().catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index 595f372c43d..a1e8f22e37d 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -110,6 +110,7 @@ const EXPECTED_EXPORTS = [ 'MongoTailableCursorError', 'MongoTopologyClosedError', 'MongoTransactionError', + 'MongoOperationTimeoutError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', 'WriteConcernErrorResult', From a1206a0196b603c0fb0a30908b4fbd0094ea6ca8 Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 10 Jun 2024 10:46:02 -0400 Subject: [PATCH 092/136] test(NODE-6120): Implement Unified test runner changes for CSOT (#4121) --- test/spec/unified-test-format/Makefile | 37 +++++- .../collectionData-createOptions.yml | 7 +- .../valid-pass/createEntities-operation.json | 74 ++++++++++++ .../valid-pass/createEntities-operation.yml | 38 ++++++ .../valid-pass/entity-cursor-iterateOnce.json | 111 ++++++++++++++++++ .../valid-pass/entity-cursor-iterateOnce.yml | 59 ++++++++++ .../valid-pass/entity-find-cursor.json | 15 ++- .../valid-pass/entity-find-cursor.yml | 6 +- ...ectedEventsForClient-ignoreExtraEvents.yml | 2 +- .../valid-pass/matches-lte-operator.json | 78 ++++++++++++ .../valid-pass/matches-lte-operator.yml | 41 +++++++ .../valid-pass/poc-change-streams.json | 36 ++++++ .../valid-pass/poc-change-streams.yml | 18 +++ .../valid-pass/poc-crud.json | 2 +- .../valid-pass/poc-crud.yml | 2 +- .../valid-pass/poc-sessions.json | 2 +- .../valid-pass/poc-sessions.yml | 3 +- .../poc-transactions-convenient-api.json | 2 +- .../poc-transactions-convenient-api.yml | 2 +- .../poc-transactions-mongos-pin-auto.json | 2 +- .../poc-transactions-mongos-pin-auto.yml | 2 +- .../valid-pass/poc-transactions.json | 6 +- .../valid-pass/poc-transactions.yml | 6 +- test/tools/unified-spec-runner/match.ts | 32 ++++- test/tools/unified-spec-runner/schema.ts | 1 + 25 files changed, 547 insertions(+), 37 deletions(-) create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/spec/unified-test-format/valid-pass/createEntities-operation.yml create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.json create mode 100644 test/spec/unified-test-format/valid-pass/matches-lte-operator.yml diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index bb4ba99a449..4d37fce9ac8 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -25,6 +25,7 @@ import { MongoBulkWriteError, MongoClientBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -98,6 +99,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -106,7 +120,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -117,7 +132,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -130,7 +146,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -390,6 +407,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -759,6 +779,12 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; From a47e28061160bf26c8121c9a502839bd85546f06 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 21 Jun 2024 12:06:30 -0400 Subject: [PATCH 093/136] refactor(NODE-6187): refactor to use TimeoutContext abstraction (#4131) --- src/bulk/common.ts | 4 + src/cmap/connection.ts | 4 +- src/cmap/connection_pool.ts | 39 +--- src/index.ts | 18 +- src/operations/aggregate.ts | 5 +- src/operations/bulk_write.ts | 11 +- src/operations/command.ts | 8 +- src/operations/count.ts | 9 +- src/operations/create_collection.ts | 18 +- src/operations/delete.ts | 21 +- src/operations/distinct.ts | 9 +- src/operations/drop.ts | 24 ++- src/operations/estimated_document_count.ts | 9 +- src/operations/execute_operation.ts | 16 +- src/operations/find.ts | 6 +- src/operations/find_and_modify.ts | 9 +- src/operations/get_more.ts | 5 +- src/operations/indexes.ts | 22 +- src/operations/insert.ts | 19 +- src/operations/kill_cursors.ts | 12 +- src/operations/list_collections.ts | 5 +- src/operations/list_databases.ts | 11 +- src/operations/operation.ts | 10 +- src/operations/profiling_level.ts | 9 +- src/operations/remove_user.ts | 9 +- src/operations/rename.ts | 9 +- src/operations/run_command.ts | 17 +- src/operations/search_indexes/create.ts | 12 +- src/operations/search_indexes/drop.ts | 9 +- src/operations/search_indexes/update.ts | 9 +- src/operations/set_profiling_level.ts | 6 +- src/operations/stats.ts | 9 +- src/operations/update.ts | 24 ++- src/operations/validate_collection.ts | 9 +- src/sdam/server.ts | 12 +- src/sdam/topology.ts | 55 +++-- src/timeout.ts | 166 +++++++++++++- ...lient_side_operations_timeout.unit.test.ts | 12 +- .../node_csot.test.ts | 2 +- test/tools/cmap_spec_runner.ts | 12 +- test/unit/cmap/connection_pool.test.js | 22 +- test/unit/error.test.ts | 19 +- test/unit/operations/get_more.test.ts | 2 +- test/unit/sdam/topology.test.ts | 76 +++++-- test/unit/timeout.test.ts | 204 +++++++++++++++++- 45 files changed, 796 insertions(+), 202 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index a62d62a4a5c..dc0bcfb513f 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -19,6 +19,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, getTopology, @@ -842,6 +843,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507b95b0b98..f7bb1789b7c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -30,7 +30,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type TimeoutContext } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -97,7 +97,7 @@ export interface CommandOptions extends BSONSerializeOptions { directConnection?: boolean; /** @internal */ - timeout?: Timeout; + timeoutContext?: TimeoutContext; } /** @public */ diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 79440db1e06..5369cc155aa 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -27,8 +27,8 @@ import { } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; -import { type Callback, csotMin, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type TimeoutContext, TimeoutError } from '../timeout'; +import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -355,41 +355,15 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(options?: { timeout?: Timeout }): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const serverSelectionTimeoutMS = this[kServer].topology.s.serverSelectionTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - let timeout: Timeout | null = null; - if (options?.timeout) { - // CSOT enabled - // Determine if we're using the timeout passed in or a new timeout - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - // This check determines whether or not Topology.selectServer used the configured - // `timeoutMS` or `serverSelectionTimeoutMS` value for its timeout - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - // server selection used `timeoutMS`, so we should use the existing timeout as the timeout - // here - timeout = options.timeout; - } else { - // server selection used `serverSelectionTimeoutMS`, so we construct a new timeout with - // the time remaining to ensure that Topology.selectServer and ConnectionPool.checkOut - // cumulatively don't spend more than `serverSelectionTimeoutMS` blocking - timeout = Timeout.expires(serverSelectionTimeoutMS - options.timeout.timeElapsed); - } - } - } else { - timeout = Timeout.expires(waitQueueTimeoutMS); - } + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, @@ -404,6 +378,7 @@ export class ConnectionPool extends TypedEventEmitter { return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; this.emitAndLog( @@ -416,7 +391,7 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); - if (options?.timeout) { + if (options.timeoutContext.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during connection checkout', { cause: timeoutError }); @@ -425,7 +400,7 @@ export class ConnectionPool extends TypedEventEmitter { } throw error; } finally { - if (timeout !== options?.timeout) timeout?.clear(); + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } diff --git a/src/index.ts b/src/index.ts index 13df3e8c437..693fcf03493 100644 --- a/src/index.ts +++ b/src/index.ts @@ -566,7 +566,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -597,7 +603,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 7b67fd0422d..f1721ba41cd 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -4,6 +4,7 @@ import { MongoInvalidArgumentError } from '../error'; import { type ExplainOptions } from '../explain'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -105,7 +106,8 @@ export class AggregateOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -150,6 +152,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/command.ts b/src/operations/command.ts index c64b4ae963a..5bd80f796d1 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -7,6 +7,7 @@ import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { commandSupportsReadConcern, decorateWithExplain, @@ -112,19 +113,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -132,7 +136,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, - timeout: this.timeout, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..82330a11e76 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -36,7 +37,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +64,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..afb2680b9a0 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -124,7 +125,11 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; @@ -155,7 +160,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +168,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +178,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +186,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +204,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..787bb6e7d0f 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index ec7c233eeca..0cffa0c35f7 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,7 +24,8 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { supportsRetryableWrites } from '../utils'; +import { TimeoutContext } from '../timeout'; +import { squashError, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -86,6 +87,12 @@ export async function executeOperation< ); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -109,7 +116,8 @@ export async function executeOperation< return await tryOperation(operation, { topology, session, - readPreference + readPreference, + timeoutContext }); } finally { if (session?.owner != null && session.owner === owner) { @@ -268,7 +276,7 @@ async function tryOperation< if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) { operation.resetBatch(); } - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; diff --git a/src/operations/find.ts b/src/operations/find.ts index d34d99cc745..a2ea2ad25e8 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -6,6 +6,7 @@ import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -105,7 +106,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -124,7 +126,7 @@ export class FindOperation extends CommandOperation { ...this.bsonOptions, documentsReturnedIn: 'firstBatch', session, - timeout: this.timeout + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..c96a5d73453 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,9 +349,13 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } @@ -379,7 +388,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +403,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..702db0fe3f2 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -54,12 +55,14 @@ export class ListCollectionsOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index e08d25bfec0..8558af7a4e5 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,7 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; -import { type Timeout } from '../timeout'; +import { type Timeout, type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -80,15 +80,17 @@ export abstract class AbstractOperation { this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; this.trySecondaryWrite = false; - - this.timeoutMS = options.timeoutMS; } /** Must match the first key of the command object sent to the server. Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index 56462fa8843..b91e2d0344e 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -33,7 +34,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -42,7 +47,7 @@ export class RunCommandOperation extends AbstractOperation { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }, this.options.responseType ); @@ -67,13 +72,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, session, - timeout: this.timeout + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 7e5e55d18d6..9661026e3eb 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -32,14 +33,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index 4e287cca012..e9ea0ad01ce 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -4,6 +4,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -31,7 +36,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index aad7f93536c..e88e777d675 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -3,6 +3,7 @@ import type { Document } from 'bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -19,7 +20,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -27,7 +32,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 3d2a3ca1a31..08325086d53 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4c9d71d807d..6117b5317cd 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -34,11 +34,10 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, - csotMin, type EventEmitterWithState, HostAddress, List, @@ -179,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-5685): Make this required + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -458,13 +460,20 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.options.timeoutMS; - const timeout = timeoutMS != null ? Timeout.expires(timeoutMS) : undefined; + const timeoutMS = this.client.s.options.timeoutMS; + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; + + const timeoutContext = TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { operationName: 'ping', - timeout, - ...options + ...options, + timeoutContext }; try { const server = await this.selectServer( @@ -474,7 +483,7 @@ export class Topology extends TypedEventEmitter { const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, { timeout }); + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -563,24 +572,10 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } - const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS ?? 0; - let timeout: Timeout | null; - if (options.timeout) { - // CSOT Enabled - if (options.timeout.duration > 0 || serverSelectionTimeoutMS > 0) { - if ( - options.timeout.duration === serverSelectionTimeoutMS || - csotMin(options.timeout.duration, serverSelectionTimeoutMS) < serverSelectionTimeoutMS - ) { - timeout = options.timeout; - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); - } - } else { - timeout = null; - } - } else { - timeout = Timeout.expires(serverSelectionTimeoutMS); + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); } const isSharded = this.description.type === TopologyType.Sharded; @@ -604,7 +599,7 @@ export class Topology extends TypedEventEmitter { ) ); } - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } @@ -654,7 +649,7 @@ export class Topology extends TypedEventEmitter { ); } - if (options.timeout) { + if (options.timeoutContext?.csotEnabled()) { throw new MongoOperationTimeoutError('Timed out during server selection', { cause: timeoutError }); @@ -664,7 +659,7 @@ export class Topology extends TypedEventEmitter { // Other server selection error throw error; } finally { - if (timeout !== options.timeout) timeout?.clear(); + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** diff --git a/src/timeout.ts b/src/timeout.ts index 7af1a23f261..3d65992a02b 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,7 +1,7 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { @@ -107,3 +107,165 @@ export class Timeout extends Promise { ); } } + +/** @internal */ +export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract csotEnabled(): this is CSOTTimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _maxTimeMS?: number; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this._maxTimeMS ?? -1; + } + + set maxTimeMS(v: number) { + this._maxTimeMS = v; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object') { + const usingServerSelectionTimeoutMS = + this.serverSelectionTimeoutMS !== 0 && + csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; + + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + } else { + if (this.timeoutMS > 0) { + this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (typeof this._connectionCheckoutTimeout !== 'object') { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } +} diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c1426d8db1d..c4989f58d7f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -33,16 +33,20 @@ describe('CSOT spec unit tests', function () { client = this.configuration.newClient({ timeoutMS: 1000 }); // Spy on connection checkout and pull options argument const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); - const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); const expiresSpy = sinon.spy(Timeout, 'expires'); await client.db('db').collection('collection').insertOne({ x: 1 }); expect(checkoutSpy).to.have.been.calledOnce; - expect(checkoutSpy.firstCall.args[0].timeout).to.exist; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; // Check that we passed through the timeout - expect(checkoutSpy.firstCall.args[0].timeout).to.equal( - selectServerSpy.lastCall.lastArg.timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout ); // Check that no more Timeouts are constructed after we enter checkout diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 5636eb00db7..17d85ba5b23 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -143,7 +143,7 @@ describe('CSOT driver tests', () => { }); it('throws a MongoOperationTimeoutError', { - metadata: { requires: { mongodb: '>=4.4' } }, + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index f6d7e68bedc..9bb2abdb87a 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -12,7 +12,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -185,7 +186,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 18048befab4..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -10,8 +10,10 @@ const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -44,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -64,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -93,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -114,11 +122,15 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - const conn = await pool.checkOut(); - const err = await pool.checkOut().catch(e => e); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); sinon.stub(pool, 'availableConnectionCount').get(() => 0); pool.checkIn(conn); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..bdc049cbc4f 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -28,6 +28,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -376,11 +377,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +426,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..17bc20f6fa7 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -69,7 +69,7 @@ describe('GetMoreOperation', function () { const call = stub.getCall(0); expect(call.args[0]).to.equal(namespace); expect(call.args[1]).to.deep.equal(expectedGetMoreCommand); - expect(call.args[2]).to.deep.equal(opts); + expect(call.args[2]).to.containSubset(opts); }); }); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..119d0516a9c 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,6 +1,14 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; @@ -115,3 +123,197 @@ describe('Timeout', function () { }); }); }); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); + }); + }); + + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); + }); + }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); + }); + }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); + }); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); + + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); + }); + }); + }); +}); From 398066ea27ea343df76c2c197094fdef8d82337f Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 22 Jul 2024 11:17:22 -0400 Subject: [PATCH 094/136] refactor(NODE-6230): executeOperation to use iterative retry mechanism (#4157) --- src/cmap/connection_pool.ts | 6 ++++-- src/operations/execute_operation.ts | 27 ++++++++++++++++----------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5369cc155aa..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -28,7 +28,7 @@ import { import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; import { type TimeoutContext, TimeoutError } from '../timeout'; -import { type Callback, List, makeCounter, promiseWithResolvers } from '../utils'; +import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; import { @@ -356,6 +356,7 @@ export class ConnectionPool extends TypedEventEmitter { * explicitly destroyed by the new owner. */ async checkOut(options: { timeoutContext: TimeoutContext }): Promise { + const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) @@ -367,7 +368,8 @@ export class ConnectionPool extends TypedEventEmitter { const waitQueueMember: WaitQueueMember = { resolve, - reject + reject, + checkoutTime }; this[kWaitQueue].push(waitQueueMember); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 0cffa0c35f7..15cad8c32a7 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -25,7 +25,7 @@ import { import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import { TimeoutContext } from '../timeout'; -import { squashError, supportsRetryableWrites } from '../utils'; +import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -87,12 +87,6 @@ export async function executeOperation< ); } - timeoutContext ??= TimeoutContext.create({ - serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, - waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, - timeoutMS: operation.options.timeoutMS - }); - const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -112,12 +106,18 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, - readPreference, - timeoutContext + readPreference }); } finally { if (session?.owner != null && session.owner === owner) { @@ -156,6 +156,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -179,7 +180,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -197,7 +201,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); From c3337237e72b4e0064d7a8b5abe5af65461e2cdf Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 26 Jul 2024 09:55:20 -0400 Subject: [PATCH 095/136] feat(NODE-5682): set maxTimeMS on commands and preempt I/O (#4174) Co-authored-by: Warren James --- src/admin.ts | 5 +- src/cmap/connection.ts | 66 ++++++++++++++++--- src/cmap/wire_protocol/on_data.ts | 17 ++++- src/db.ts | 2 +- src/sdam/topology.ts | 17 +++-- src/timeout.ts | 43 ++++++++++-- ...ient_side_operations_timeout.prose.test.ts | 20 +++--- ...lient_side_operations_timeout.spec.test.ts | 33 +++++++++- .../node_csot.test.ts | 1 - test/integration/node-specific/db.test.js | 22 ++----- test/spec/{index.js => index.ts} | 19 ++---- test/tools/cmap_spec_runner.ts | 3 +- test/tools/unified-spec-runner/entities.ts | 4 +- test/tools/unified-spec-runner/match.ts | 15 ++++- test/tools/unified-spec-runner/operations.ts | 8 +-- test/unit/tools/unified_spec_runner.test.ts | 2 +- 16 files changed, 200 insertions(+), 77 deletions(-) rename test/spec/{index.js => index.ts} (67%) diff --git a/src/admin.ts b/src/admin.ts index e030384eafc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -155,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index f7bb1789b7c..010bcb8c897 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -19,6 +19,7 @@ import { MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -30,7 +31,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; -import { type TimeoutContext } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -419,6 +420,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (options.timeoutContext?.csotEnabled()) { + const { maxTimeMS } = options.timeoutContext; + if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -433,7 +439,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -442,7 +450,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse || message.moreToCome) { @@ -452,7 +461,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -629,7 +648,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -641,8 +664,32 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + if (TimeoutError.is(error)) { + throw new MongoOperationTimeoutError('Timed out at socket write'); + } + throw error; + } + } + return await drainEvent; } /** @@ -654,10 +701,13 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); this.messageStream.resume(); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..a32c6b1b484 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,7 @@ import { type EventEmitter } from 'events'; +import { MongoOperationTimeoutError } from '../../error'; +import { type TimeoutContext, TimeoutError } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +20,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -86,6 +91,8 @@ export function onData(emitter: EventEmitter) { // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + // eslint-disable-next-line github/no-then + timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -97,8 +104,12 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - if (promise != null) promise.reject(err); - else error = err; + const timeoutError = TimeoutError.is(err) + ? new MongoOperationTimeoutError('Timed out during socket read') + : undefined; + + if (promise != null) promise.reject(timeoutError ?? err); + else error = timeoutError ?? err; void closeHandler(); } diff --git a/src/db.ts b/src/db.ts index 6e1aa194acf..48501bc497e 100644 --- a/src/db.ts +++ b/src/db.ts @@ -277,7 +277,7 @@ export class Db { this.client, new RunCommandOperation(this, command, { ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS, + timeoutMS: options?.timeoutMS ?? this.timeoutMS, session: options?.session, readPreference: options?.readPreference }) diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6117b5317cd..479003f0e35 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -460,29 +460,28 @@ export class Topology extends TypedEventEmitter { } } - const timeoutMS = this.client.s.options.timeoutMS; + // TODO(NODE-6223): auto connect cannot use timeoutMS + // const timeoutMS = this.client.s.options.timeoutMS; const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const timeoutContext = TimeoutContext.create({ - timeoutMS, + timeoutMS: undefined, serverSelectionTimeoutMS, waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS }); - const selectServerOptions = { operationName: 'ping', ...options, timeoutContext }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { + if (!skipPingOnConnect && this.s.credentials) { await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); @@ -623,7 +622,11 @@ export class Topology extends TypedEventEmitter { try { timeout?.throwIfExpired(); - return await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout diff --git a/src/timeout.ts b/src/timeout.ts index 3d65992a02b..cc90b8c2e72 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,6 +1,6 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError, MongoRuntimeError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { csotMin, noop } from './utils'; /** @internal */ @@ -51,7 +51,7 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { + private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; if (duration < 0) { @@ -163,6 +163,10 @@ export abstract class TimeoutContext { abstract get clearConnectionCheckoutTimeout(): boolean; + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + abstract csotEnabled(): this is CSOTTimeoutContext; } @@ -175,13 +179,15 @@ export class CSOTTimeoutContext extends TimeoutContext { clearConnectionCheckoutTimeout: boolean; clearServerSelectionTimeout: boolean; - private _maxTimeMS?: number; - private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + private start: number; constructor(options: CSOTTimeoutContextOptions) { super(); + this.start = Math.trunc(performance.now()); + this.timeoutMS = options.timeoutMS; this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; @@ -193,11 +199,12 @@ export class CSOTTimeoutContext extends TimeoutContext { } get maxTimeMS(): number { - return this._maxTimeMS ?? -1; + return this.remainingTimeMS - this.minRoundTripTime; } - set maxTimeMS(v: number) { - this._maxTimeMS = v; + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; } csotEnabled(): this is CSOTTimeoutContext { @@ -238,6 +245,20 @@ export class CSOTTimeoutContext extends TimeoutContext { } return this._connectionCheckoutTimeout; } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket write'); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + throw new MongoOperationTimeoutError('Timed out before socket read'); + } } /** @internal */ @@ -268,4 +289,12 @@ export class LegacyTimeoutContext extends TimeoutContext { return Timeout.expires(this.options.waitQueueTimeoutMS); return null; } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 903ea9c3bb4..729bed42199 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -384,7 +384,7 @@ describe('CSOT spec prose tests', function () { clock.restore(); }); - it('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. @@ -416,10 +416,11 @@ describe('CSOT spec prose tests', function () { await clock.tickAsync(11); expect(await maybeError).to.be.instanceof(MongoServerSelectionError); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -440,9 +441,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -462,9 +464,10 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - it('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. @@ -484,7 +487,8 @@ describe('CSOT spec prose tests', function () { expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); expect(end - start).to.be.lte(15); - }); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..f73f162204f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -3,7 +3,34 @@ import { join } from 'path'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const enabled = [ + 'override-collection-timeoutMS', + 'override-database-timeoutMS', + 'override-operation-timeoutMS' +]; + +const cursorOperations = [ + 'aggregate', + 'countDocuments', + 'listIndexes', + 'createChangeStream', + 'listCollections', + 'listCollectionNames' +]; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests(join('client-side-operations-timeout')); + for (const spec of specs) { + for (const test of spec.tests) { + // not one of the test suites listed in kickoff + if (!enabled.includes(spec.name)) { + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + + // Cursor operation + if (test.operations.find(operation => cursorOperations.includes(operation.name))) + test.skipReason = 'TODO(NODE-5684): Not working yet'; + } + } + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 17d85ba5b23..0c97b910836 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -48,7 +48,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 9bb2abdb87a..892f6311df5 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -427,7 +428,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..9f4e20a828e 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 4d37fce9ac8..5eb3af88759 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -174,7 +174,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -219,6 +220,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -372,7 +377,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -785,6 +790,12 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 9cc67174f3c..7a98c7ac978 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -303,6 +303,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); @@ -313,7 +314,7 @@ operations.set('drop', async ({ entities, operation }) => { operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -767,11 +768,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); }); From 256ca4e67b4725a36cf279a82189f598d9a42862 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 1 Aug 2024 16:08:39 -0400 Subject: [PATCH 096/136] feat(NODE-6231): Add CSOT behaviour for retryable reads and writes (#4186) --- src/operations/execute_operation.ts | 9 ++++--- src/timeout.ts | 26 ++++++++++++------- ...lient_side_operations_timeout.spec.test.ts | 13 +++++++++- ...lient_side_operations_timeout.unit.test.ts | 10 +++++-- .../node_csot.test.ts | 5 ---- test/tools/unified-spec-runner/match.ts | 2 ++ 6 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 15cad8c32a7..cdddc1211a8 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -227,12 +227,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -284,7 +282,6 @@ async function tryOperation< return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -293,6 +290,10 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.serverSelectionTimeout?.clear(); + timeoutContext.connectionCheckoutTimeout?.clear(); } } diff --git a/src/timeout.ts b/src/timeout.ts index cc90b8c2e72..297a484b4ec 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -39,6 +39,7 @@ export class Timeout extends Promise { public ended: number | null = null; public duration: number; public timedOut = false; + public cleared = false; get remainingTime(): number { if (this.timedOut) return 0; @@ -53,7 +54,6 @@ export class Timeout extends Promise { /** Create a new timeout that expires in `duration` ms */ private constructor(executor: Executor = () => null, duration: number, unref = true) { let reject!: Reject; - if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } @@ -86,6 +86,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.cleared = true; } throwIfExpired(): void { @@ -213,16 +214,20 @@ export class CSOTTimeoutContext extends TimeoutContext { get serverSelectionTimeout(): Timeout | null { // check for undefined - if (typeof this._serverSelectionTimeout !== 'object') { + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError( + `Timed out in server selection after ${this.timeoutMS}ms` + ); const usingServerSelectionTimeoutMS = - this.serverSelectionTimeoutMS !== 0 && - csotMin(this.timeoutMS, this.serverSelectionTimeoutMS) === this.serverSelectionTimeoutMS; - + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; if (usingServerSelectionTimeoutMS) { - this._serverSelectionTimeout = Timeout.expires(this.serverSelectionTimeoutMS); + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); } else { - if (this.timeoutMS > 0) { - this._serverSelectionTimeout = Timeout.expires(this.timeoutMS); + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); } else { this._serverSelectionTimeout = null; } @@ -233,7 +238,10 @@ export class CSOTTimeoutContext extends TimeoutContext { } get connectionCheckoutTimeout(): Timeout | null { - if (typeof this._connectionCheckoutTimeout !== 'object') { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { if (typeof this._serverSelectionTimeout === 'object') { // null or Timeout this._connectionCheckoutTimeout = this._serverSelectionTimeout; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index f73f162204f..e4c9eb3027c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -6,7 +6,9 @@ import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const enabled = [ 'override-collection-timeoutMS', 'override-database-timeoutMS', - 'override-operation-timeoutMS' + 'override-operation-timeoutMS', + 'retryability-legacy-timeouts', + 'retryability-timeoutMS' ]; const cursorOperations = [ @@ -18,6 +20,11 @@ const cursorOperations = [ 'listCollectionNames' ]; +const bulkWriteOperations = [ + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' +]; + describe('CSOT spec tests', function () { const specs = loadSpecTests(join('client-side-operations-timeout')); for (const spec of specs) { @@ -30,6 +37,10 @@ describe('CSOT spec tests', function () { // Cursor operation if (test.operations.find(operation => cursorOperations.includes(operation.name))) test.skipReason = 'TODO(NODE-5684): Not working yet'; + + if (bulkWriteOperations.includes(test.description)) + test.skipReason = + 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } runUnifiedSuite(specs); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index c4989f58d7f..944d9b96048 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -7,7 +7,7 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { ConnectionPool, type MongoClient, Timeout, Topology } from '../../mongodb'; +import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -22,10 +22,16 @@ describe('CSOT spec unit tests', function () { it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); await client.db('db').collection('collection').insertOne({ x: 1 }); - expect(Timeout.expires).to.have.been.calledWith(10000); + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); expect(Timeout.expires).to.not.have.been.calledWith(999999); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 0c97b910836..63e2d97dd90 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,6 +1,5 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; -import * as sinon from 'sinon'; import { type ClientSession, @@ -13,10 +12,6 @@ import { } from '../../mongodb'; describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); - describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 5eb3af88759..ebcd2cfdd85 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -790,6 +790,8 @@ export function expectErrorCheck( expect(error).to.be.instanceof(MongoOperationTimeoutError); } + // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their + // errorResponse field if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { From 8a416bedca7826c687fb1fa515b96b8a8f2fa38b Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 12 Aug 2024 16:46:06 -0400 Subject: [PATCH 097/136] feat(NODE-6312): add error transformation for server timeouts (#4192) --- src/cmap/connection.ts | 29 ++++ src/cmap/wire_protocol/responses.ts | 36 +++- .../node_csot.test.ts | 163 +++++++++++++++++- 3 files changed, 225 insertions(+), 3 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 010bcb8c897..ecc5ca9c0c7 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -16,6 +16,7 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, @@ -540,6 +541,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -613,6 +619,29 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index 18afde92e72..a56016cf578 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -11,7 +11,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -111,6 +111,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 63e2d97dd90..d7d4a4ede5a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,17 +1,23 @@ /* Anything javascript specific relating to timeouts */ import { expect } from 'chai'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; import { + BSON, type ClientSession, type Collection, + Connection, type Db, type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, - MongoOperationTimeoutError + MongoOperationTimeoutError, + MongoServerError } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', () => { +describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -161,4 +167,157 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded; + let commandsFailed; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command failed', async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + }); + }); + + afterEach(() => sinon.restore()); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + }); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + }); + }); + }); }); From 52c2c9da1a474ad7fe11b6f9633e218b3f3bb482 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Mon, 9 Sep 2024 11:11:11 -0400 Subject: [PATCH 098/136] feat(NODE-6313): add CSOT support to sessions and transactions (#4199) --- package-lock.json | 82 +++--- package.json | 2 +- src/cmap/connection.ts | 7 + src/cmap/wire_protocol/on_data.ts | 15 +- src/collection.ts | 12 +- src/db.ts | 22 +- src/error.ts | 3 + src/operations/execute_operation.ts | 8 +- src/sessions.ts | 255 ++++++++++++------ src/timeout.ts | 49 +++- src/transactions.ts | 7 +- src/utils.ts | 13 +- ...ient_side_operations_timeout.prose.test.ts | 167 +++++++++++- ...lient_side_operations_timeout.spec.test.ts | 18 +- .../node_csot.test.ts | 150 +++++++++++ .../sessions-inherit-timeoutMS.json | 28 +- .../sessions-inherit-timeoutMS.yml | 19 +- ...sessions-override-operation-timeoutMS.json | 32 ++- .../sessions-override-operation-timeoutMS.yml | 23 +- .../sessions-override-timeoutMS.json | 28 +- .../sessions-override-timeoutMS.yml | 19 +- test/tools/unified-spec-runner/entities.ts | 4 + test/tools/unified-spec-runner/match.ts | 19 +- test/tools/unified-spec-runner/operations.ts | 27 +- 24 files changed, 776 insertions(+), 233 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b07cd361d5..1d9cebf509b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", + "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index 479356905dc..2de0e1811f0 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.1", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index ecc5ca9c0c7..7ad367e6733 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -745,6 +745,13 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + if (TimeoutError.is(readError)) { + throw new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + } + throw readError; } finally { this.dataEvents = null; this.messageStream.pause(); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index a32c6b1b484..23fd88e2828 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,7 +1,6 @@ import { type EventEmitter } from 'events'; -import { MongoOperationTimeoutError } from '../../error'; -import { type TimeoutContext, TimeoutError } from '../../timeout'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -91,8 +90,11 @@ export function onData( // Adding event handlers emitter.on('data', eventHandler); emitter.on('error', errorHandler); + + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); // eslint-disable-next-line github/no-then - timeoutContext?.timeoutForSocketRead?.then(undefined, errorHandler); + timeoutForSocketRead?.then(undefined, errorHandler); return iterator; @@ -104,12 +106,9 @@ export function onData( function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); - const timeoutError = TimeoutError.is(err) - ? new MongoOperationTimeoutError('Timed out during socket read') - : undefined; - if (promise != null) promise.reject(timeoutError ?? err); - else error = timeoutError ?? err; + if (promise != null) promise.reject(err); + else error = err; void closeHandler(); } diff --git a/src/collection.ts b/src/collection.ts index dbd91371cce..f3a206b0c7b 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -470,10 +470,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } diff --git a/src/db.ts b/src/db.ts index 48501bc497e..bd0b5450b8c 100644 --- a/src/db.ts +++ b/src/db.ts @@ -275,12 +275,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - timeoutMS: options?.timeoutMS ?? this.timeoutMS, - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -385,7 +389,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } diff --git a/src/error.ts b/src/error.ts index f0441426feb..2b973cccc38 100644 --- a/src/error.ts +++ b/src/error.ts @@ -128,6 +128,9 @@ function isAggregateError(e: unknown): e is Error & { errors: Error[] } { * mongodb-client-encryption has a dependency on this error, it uses the constructor with a string argument */ export class MongoError extends Error { + get [Symbol.toStringTag]() { + return this.name; + } /** @internal */ [kErrorLabels]: Set; /** diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index cdddc1211a8..2523058ecfd 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -58,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T, timeoutContext?: TimeoutContext): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -81,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -107,6 +102,7 @@ export async function executeOperation< } timeoutContext ??= TimeoutContext.create({ + session, serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, timeoutMS: operation.options.timeoutMS diff --git a/src/sessions.ts b/src/sessions.ts index bad966ed71c..bbd1785275f 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,11 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +102,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** @internal */ + timeoutMS?: number; } /** @@ -115,7 +122,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -137,6 +144,9 @@ export class ClientSession /** @internal */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -149,7 +159,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -269,8 +279,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -286,10 +301,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -441,8 +452,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -492,8 +505,25 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (firstCommitError) { if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) { @@ -503,7 +533,7 @@ export class ClientSession this.unpin({ force: true }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; } catch (retryCommitError) { // If the retry failed, we process that error instead of the original @@ -535,8 +565,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -581,18 +616,45 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -647,96 +709,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - - let committed = false; - let result: any; - while (!committed) { - this.startTransaction(options); // may throw on error + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - result = await promise; + let committed = false; + let result: any; - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } + result = await promise; - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index 297a484b4ec..f057bdb90b4 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -52,12 +55,19 @@ export class Timeout extends Promise { } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = true) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; + if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -67,16 +77,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -90,11 +104,11 @@ export class Timeout extends Promise { } throwIfExpired(): void { - if (this.timedOut) throw new TimeoutError('Timed out'); + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); } static is(timeout: unknown): timeout is Timeout { @@ -107,10 +121,16 @@ export class Timeout extends Promise { typeof timeout.then === 'function' ); } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } } /** @internal */ -export type TimeoutContextOptions = LegacyTimeoutContextOptions | CSOTTimeoutContextOptions; +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; /** @internal */ export type LegacyTimeoutContextOptions = { @@ -151,6 +171,7 @@ function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions /** @internal */ export abstract class TimeoutContext { static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); else throw new MongoRuntimeError('Unrecognized options'); @@ -183,7 +204,7 @@ export class CSOTTimeoutContext extends TimeoutContext { private _serverSelectionTimeout?: Timeout | null; private _connectionCheckoutTimeout?: Timeout | null; public minRoundTripTime = 0; - private start: number; + public start: number; constructor(options: CSOTTimeoutContextOptions) { super(); @@ -217,8 +238,8 @@ export class CSOTTimeoutContext extends TimeoutContext { if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { const { remainingTimeMS, serverSelectionTimeoutMS } = this; if (remainingTimeMS <= 0) - throw new MongoOperationTimeoutError( - `Timed out in server selection after ${this.timeoutMS}ms` + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) ); const usingServerSelectionTimeoutMS = serverSelectionTimeoutMS !== 0 && @@ -258,14 +279,14 @@ export class CSOTTimeoutContext extends TimeoutContext { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket write'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); } get timeoutForSocketRead(): Timeout | null { const { remainingTimeMS } = this; if (!Number.isFinite(remainingTimeMS)) return null; if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); - throw new MongoOperationTimeoutError('Timed out before socket read'); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..db251c82c16 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; @@ -68,7 +68,10 @@ export interface TransactionOptions extends CommandOperationOptions { writeConcern?: WriteConcern; /** A default read preference for commands in this transaction */ readPreference?: ReadPreferenceLike; - /** Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds */ + /** + * Specifies the maximum amount of time to allow a commit action on a transaction to run in milliseconds + * @deprecated This option is deprecated in favor of `timeoutMS` or `defaultTimeoutMS`. + */ maxCommitTimeMS?: number; } diff --git a/src/utils.ts b/src/utils.ts index ebc0784cb1f..04174813c9c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -517,6 +517,10 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -544,9 +548,14 @@ export function resolveOptions( result.readPreference = readPreference; } - const timeoutMS = options?.timeoutMS; + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } - result.timeoutMS = timeoutMS ?? parent?.timeoutMS; + result.timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; return result; } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 729bed42199..406aa53ed6a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,6 +1,7 @@ /* Specification prose tests */ import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { @@ -9,6 +10,7 @@ import { MongoServerSelectionError, now } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -595,7 +597,10 @@ describe('CSOT spec prose tests', function () { 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context.skip('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -625,12 +630,92 @@ describe('CSOT spec prose tests', function () { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context.skip('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -641,7 +726,7 @@ describe('CSOT spec prose tests', function () { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -658,6 +743,80 @@ describe('CSOT spec prose tests', function () { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index e4c9eb3027c..a178cecc5d2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,4 +1,5 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; @@ -8,7 +9,10 @@ const enabled = [ 'override-database-timeoutMS', 'override-operation-timeoutMS', 'retryability-legacy-timeouts', - 'retryability-timeoutMS' + 'retryability-timeoutMS', + 'sessions-override-operation-timeoutMS', + 'sessions-override-timeoutMS', + 'sessions-inherit-timeoutMS' ]; const cursorOperations = [ @@ -43,5 +47,15 @@ describe('CSOT spec tests', function () { 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } - runUnifiedSuite(specs); + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index d7d4a4ede5a..cc767c1d80a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -12,6 +12,7 @@ import { type FindCursor, LEGACY_HELLO_COMMAND, type MongoClient, + MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; @@ -320,4 +321,153 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { }); }); }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); }); diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 9f4e20a828e..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index ebcd2cfdd85..662746b4591 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -513,6 +513,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -527,9 +534,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -538,9 +543,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -554,9 +557,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 7a98c7ac978..5b5b7040698 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -19,6 +19,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +50,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -241,7 +237,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -371,7 +372,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -718,13 +719,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -945,7 +950,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -959,7 +964,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); From 546366f2def8cf0026b7d5adf56325e07697b66d Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 11:35:27 -0400 Subject: [PATCH 099/136] feat(NODE-6304): add CSOT support for non-tailable cursors (#4195) --- src/cmap/connection.ts | 4 +- src/cmap/wire_protocol/on_data.ts | 1 - src/collection.ts | 6 +- src/cursor/abstract_cursor.ts | 146 +++- src/cursor/aggregation_cursor.ts | 20 +- src/cursor/change_stream_cursor.ts | 6 +- src/cursor/find_cursor.ts | 2 +- src/cursor/list_collections_cursor.ts | 2 +- src/cursor/list_indexes_cursor.ts | 2 +- src/cursor/run_command_cursor.ts | 14 +- src/index.ts | 2 +- src/operations/aggregate.ts | 3 + src/operations/execute_operation.ts | 3 +- src/operations/find.ts | 3 + src/operations/indexes.ts | 9 +- src/operations/list_collections.ts | 3 + src/operations/operation.ts | 3 + src/operations/run_command.ts | 2 + src/sessions.ts | 12 +- src/timeout.ts | 27 +- ...ient_side_operations_timeout.prose.test.ts | 84 ++- ...lient_side_operations_timeout.spec.test.ts | 83 ++- .../node_csot.test.ts | 335 ++++++++- .../command-execution.json | 153 ++++ .../client-side-operations-timeout/README.md | 661 ++++++++++++++++++ .../change-streams.json | 20 +- .../change-streams.yml | 30 +- .../close-cursors.json | 12 +- .../close-cursors.yml | 12 +- .../command-execution.json | 2 +- .../command-execution.yml | 5 +- .../convenient-transactions.json | 22 +- .../convenient-transactions.yml | 15 +- .../deprecated-options.json | 2 +- .../deprecated-options.yml | 2 +- .../gridfs-advanced.yml | 2 +- .../non-tailable-cursors.json | 20 +- .../non-tailable-cursors.yml | 32 +- .../retryability-timeoutMS.json | 250 +++++++ .../retryability-timeoutMS.yml | 100 +++ .../tailable-awaitData.json | 14 +- .../tailable-awaitData.yml | 18 +- .../tailable-non-awaitData.json | 10 +- .../tailable-non-awaitData.yml | 12 +- test/tools/unified-spec-runner/operations.ts | 7 +- test/unit/cursor/aggregation_cursor.test.ts | 67 +- 46 files changed, 2006 insertions(+), 234 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json create mode 100644 test/spec/client-side-operations-timeout/README.md diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 7ad367e6733..507615e9f03 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -86,6 +86,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -421,7 +422,7 @@ export class Connection extends TypedEventEmitter { ...options }; - if (options.timeoutContext?.csotEnabled()) { + if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { const { maxTimeMS } = options.timeoutContext; if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } @@ -621,7 +622,6 @@ export class Connection extends TypedEventEmitter { for await (const document of this.sendCommand(ns, command, options, responseType)) { if (options.timeoutContext?.csotEnabled()) { if (MongoDBResponse.is(document)) { - // TODO(NODE-5684): test coverage to be added once cursors are enabling CSOT if (document.isMaxTimeExpiredError) { throw new MongoOperationTimeoutError('Server reported a timeout error', { cause: new MongoServerError(document.toObject()) diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 23fd88e2828..64c636f41f1 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -93,7 +93,6 @@ export function onData( const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; timeoutForSocketRead?.throwIfExpired(); - // eslint-disable-next-line github/no-then timeoutForSocketRead?.then(undefined, errorHandler); return iterator; diff --git a/src/collection.ts b/src/collection.ts index f3a206b0c7b..a73a5276f5f 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -678,7 +678,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + if (error instanceof MongoOperationTimeoutError) throw error; // TODO: Check the spec for index management behaviour/file a drivers ticket for this + // Seems like we should throw all errors return false; } } diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..d0f386923ad 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,17 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** @public*/ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** @public + * TODO(NODE-5688): Document and release + * */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -105,6 +117,8 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { noCursorTimeout?: boolean; /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -117,6 +131,8 @@ export type InternalAbstractCursorOptions = Omit { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -658,6 +727,8 @@ export abstract class AbstractCursor< this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -707,7 +778,7 @@ export abstract class AbstractCursor< } ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,6 +789,12 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }); + } try { const state = await this._initialize(this.cursorSession); const response = state.response; @@ -729,7 +806,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -763,6 +840,7 @@ export abstract class AbstractCursor< // otherwise need to call getMore const batchSize = this.cursorOptions.batchSize || 1000; + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; try { const response = await this.getMore(batchSize); @@ -770,7 +848,7 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); + await this.cleanup(undefined, error); } catch (error) { // `cleanupCursor` should never throw, squash and throw the original error squashError(error); @@ -791,7 +869,7 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; try { @@ -806,11 +884,23 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + let timeoutContext: TimeoutContext | undefined; + if (timeoutMS != null) { + this.timeoutContext?.clear(); + timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }); + } else { + this.timeoutContext?.refresh(); + timeoutContext = this.timeoutContext; + } await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContext ); } } catch (error) { diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 9762c8a03bf..056f28454ce 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,4 +1,5 @@ import type { Document } from '../bson'; +import { MongoAPIError } from '../error'; import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; @@ -9,6 +10,7 @@ import { mergeOptions, type MongoDBNamespace } from '../utils'; import { AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -38,6 +40,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -60,7 +71,7 @@ export class AggregationCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } @@ -95,6 +106,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..13f58675552 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 83a12818bd0..96b764dc7ff 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -69,7 +69,7 @@ export class FindCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, findOperation); + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..6b31ce2263a 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,20 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** @internal */ + timeoutMS?: number; + /** @internal */ + timeoutMode?: CursorTimeoutMode; } & BSONSerializeOptions; /** @public */ @@ -105,7 +113,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +131,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/index.ts b/src/index.ts index 693fcf03493..e555d97e9ed 100644 --- a/src/index.ts +++ b/src/index.ts @@ -109,7 +109,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, type CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index f1721ba41cd..0e9fbb0b846 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,5 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { type ExplainOptions } from '../explain'; import type { Server } from '../sdam/server'; @@ -44,6 +45,8 @@ export interface AggregateOptions extends Omit * @deprecated This API is deprecated in favor of `collection.find().explain()`. */ explain?: ExplainOptions['explain']; + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index c96a5d73453..220d438d834 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,7 +1,7 @@ import type { Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; import type { Collection } from '../collection'; -import { type AbstractCursorOptions } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; @@ -360,7 +360,12 @@ export class DropIndexOperation extends CommandOperation { } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal TODO(NODE-5688): make this public */ + timeoutMode?: CursorTimeoutMode; + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index 702db0fe3f2..50df243a3ff 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,5 +1,6 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; @@ -16,6 +17,8 @@ export interface ListCollectionsOptions extends Omit { public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; public cleared = false; get remainingTime(): number { @@ -100,6 +100,7 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; this.cleared = true; } @@ -190,6 +191,10 @@ export abstract class TimeoutContext { abstract get timeoutForSocketRead(): Timeout | null; abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; } /** @internal */ @@ -288,6 +293,18 @@ export class CSOTTimeoutContext extends TimeoutContext { if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } } /** @internal */ @@ -326,4 +343,12 @@ export class LegacyTimeoutContext extends TimeoutContext { get timeoutForSocketRead(): Timeout | null { return null; } + + refresh(): void { + return; + } + + clear(): void { + return; + } } diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 406aa53ed6a..0d36998fd96 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -4,7 +4,9 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { type CommandStartedEvent } from '../../../mongodb'; import { + type CommandSucceededEvent, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -216,12 +218,52 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('5. Blocking Iteration Methods', () => { + context('5. Blocking Iteration Methods', () => { /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 20 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient.db('db').dropCollection('coll'); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -248,6 +290,29 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it.skip('send correct number of finds and getMores', async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true, awaitData: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6305)'; }); context('Change Streams', () => { @@ -272,6 +337,23 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it.skip('sends correct number of aggregate and getMores', async function () { + const changeStream = client.db('db').collection('coll').watch(); + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 1 getMore + expect(getMores).to.have.lengthOf(1); + }).skipReason = 'TODO(NODE-6305)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index a178cecc5d2..99914fa08e7 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -4,49 +4,55 @@ import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -const enabled = [ - 'override-collection-timeoutMS', - 'override-database-timeoutMS', - 'override-operation-timeoutMS', - 'retryability-legacy-timeouts', - 'retryability-timeoutMS', - 'sessions-override-operation-timeoutMS', - 'sessions-override-timeoutMS', - 'sessions-inherit-timeoutMS' -]; +const skippedSpecs = { + bulkWrite: 'TODO(NODE-6274)', + 'change-streams': 'TODO(NODE-6035)', + 'convenient-transactions': 'TODO(NODE-5687)', + 'deprecated-options': 'TODO(NODE-5689)', + 'gridfs-advanced': 'TODO(NODE-6275)', + 'gridfs-delete': 'TODO(NODE-6275)', + 'gridfs-download': 'TODO(NODE-6275)', + 'gridfs-find': 'TODO(NODE-6275)', + 'gridfs-upload': 'TODO(NODE-6275)', + 'tailable-awaitData': 'TODO(NODE-6035)', + 'tailable-non-awaitData': 'TODO(NODE-6035)' +}; -const cursorOperations = [ - 'aggregate', - 'countDocuments', - 'listIndexes', - 'createChangeStream', - 'listCollections', - 'listCollectionNames' -]; - -const bulkWriteOperations = [ - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection' -]; +const skippedTests = { + 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', + 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': + 'TODO(NODE-6305)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(NODE-6305)', + 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': + 'TODO(NODE-6274)', + 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': + 'TODO(NODE-6274)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' +}; describe('CSOT spec tests', function () { - const specs = loadSpecTests(join('client-side-operations-timeout')); + const specs = loadSpecTests('client-side-operations-timeout'); for (const spec of specs) { for (const test of spec.tests) { - // not one of the test suites listed in kickoff - if (!enabled.includes(spec.name)) { - test.skipReason = 'TODO(NODE-5684): Not working yet'; + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; } - - // Cursor operation - if (test.operations.find(operation => cursorOperations.includes(operation.name))) - test.skipReason = 'TODO(NODE-5684): Not working yet'; - - if (bulkWriteOperations.includes(test.description)) - test.skipReason = - 'TODO(NODE-6274): update test runner to check errorResponse field of MongoBulkWriteError in isTimeoutError assertion'; } } + runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; if ( @@ -59,3 +65,10 @@ describe('CSOT spec tests', function () { return false; }); }); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); +}); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index cc767c1d80a..f5ada7eef9f 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,6 @@ /* Anything javascript specific relating to timeouts */ +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -7,6 +9,9 @@ import { BSON, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, Connection, type Db, type FindCursor, @@ -18,7 +23,9 @@ import { } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; -describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; @@ -171,8 +178,8 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { describe('server-side maxTimeMS errors are transformed', () => { let client: MongoClient; - let commandsSucceeded; - let commandsFailed; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); @@ -221,18 +228,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command failed', async () => { - const error = await client - .db() - .command({ ping: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.property('code', 50); - - expect(commandsFailed).to.have.lengthOf(1); - expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { @@ -267,18 +278,22 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { afterEach(() => sinon.restore()); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client - .db('admin') - .command({ giveMeWriteErrors: 1 }) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); - }); + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); }); describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { @@ -306,22 +321,266 @@ describe('CSOT driver tests', { requires: { mongodb: '>=4.4' } }, () => { await client.db('admin').command({ ...failpoint, mode: 'off' }); }); - it('throws a MongoOperationTimeoutError error and emits command succeeded', async () => { - const error = await client + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: 'iteration', timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .project({ _id: 0 }); + await cursor.toArray(); + + expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores + expect( + commandStarted.filter(ev => { + return ( + ev.command.find != null && + ev.command.getMore != null && + ev.command.maxTimeMS != null + ); + }) + ).to.have.lengthOf(0); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true }); + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient .db() - .collection('a') - .insertOne({}) - .catch(error => error); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(error.cause).to.be.instanceOf(MongoServerError); - expect(error.cause).to.have.nested.property('writeConcernError.code', 50); - - expect(commandsSucceeded).to.have.lengthOf(1); - expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); }); }); }); + describe.skip('Tailable non-awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe.skip('Tailable awaitData cursors').skipReason = + 'TODO(NODE-6305): implement CSOT for Tailable cursors'; + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 5b5b7040698..31414fa4664 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -215,7 +215,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -787,7 +788,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); From 4f8e7c9b0de5148887a79fb8be930a69db16492d Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 12 Sep 2024 15:24:39 -0400 Subject: [PATCH 100/136] fix(NODE-6374): MongoOperationTimeoutError inherits MongoRuntimeError (#4237) --- etc/notes/errors.md | 6 +++++- src/error.ts | 21 ++++++++++++++++++--- test/unit/error.test.ts | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..114bc1b2e2c 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,10 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +- TODO(NODE-5688): Add MongoOperationTimeoutError documentation + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/src/error.ts b/src/error.ts index 2b973cccc38..3853b7e19c2 100644 --- a/src/error.ts +++ b/src/error.ts @@ -314,7 +314,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -861,9 +861,24 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } /** - * @internal + * @public + * @category Error + * + * This error is thrown when an operation could not be completed within the specified `timeoutMS`. + * TODO(NODE-5688): expand this documentation. + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` */ -export class MongoOperationTimeoutError extends MongoRuntimeError { +export class MongoOperationTimeoutError extends MongoDriverError { override get name(): string { return 'MongoOperationTimeoutError'; } diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index bdc049cbc4f..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -173,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { From 8b9eeefceaa3afecd32fd024334d0fc151a343d6 Mon Sep 17 00:00:00 2001 From: Warren James Date: Thu, 12 Sep 2024 16:02:50 -0400 Subject: [PATCH 101/136] test: remove empty skipped context blocks (#4238) --- .../client-side-operations-timeout/node_csot.test.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f5ada7eef9f..56127cc8ace 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -576,11 +576,6 @@ describe('CSOT driver tests', metadata, () => { }); }); - describe.skip('Tailable non-awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe.skip('Tailable awaitData cursors').skipReason = - 'TODO(NODE-6305): implement CSOT for Tailable cursors'; - describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } From 1eb0b74532f3a3f0a953cd839403a6776214b1c0 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Tue, 17 Sep 2024 13:27:43 -0400 Subject: [PATCH 102/136] feat(NODE-5844): add iscryptd to ServerDescription (#4239) --- src/sdam/server_description.ts | 4 ++ .../server_description.test.ts | 56 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 test/integration/server-discovery-and-monitoring/server_description.test.ts diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index aadf523d722..a650c8dba97 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -75,6 +75,8 @@ export class ServerDescription { maxWriteBatchSize: number | null; /** The max bson object size. */ maxBsonObjectSize: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -123,6 +125,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -176,6 +179,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..0a3c7eecbf6 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,56 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; + +import { MongoClient } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); From 580130d47d5c09fa3b2292e3381aa0761e4efe29 Mon Sep 17 00:00:00 2001 From: Warren James Date: Wed, 25 Sep 2024 17:43:12 -0400 Subject: [PATCH 103/136] chore: allow clientBulkWrite to use TimeoutContext (#4251) --- .../client_bulk_write/client_bulk_write.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index e901407cd78..26d1e7bb60f 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -2,6 +2,7 @@ import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; @@ -43,7 +44,8 @@ export class ClientBulkWriteOperation extends CommandOperation { let command; @@ -52,7 +54,7 @@ export class ClientBulkWriteOperation extends CommandOperation Date: Wed, 2 Oct 2024 17:31:59 -0400 Subject: [PATCH 104/136] feat(NODE-6274): add CSOT support to bulkWrite (#4250) Co-authored-by: Bailey Pearson --- src/bulk/common.ts | 18 ++- ...ient_side_operations_timeout.prose.test.ts | 142 +++++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 7 - test/tools/unified-spec-runner/match.ts | 14 +- 4 files changed, 159 insertions(+), 22 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index dc0bcfb513f..22012207a09 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -501,7 +501,7 @@ export function mergeBatchResults( async function executeCommands( bulkOperation: BulkOperationBase, - options: BulkWriteOptions + options: BulkWriteOptions & { timeoutContext?: TimeoutContext | null } ): Promise { if (bulkOperation.s.batches.length === 0) { return new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered); @@ -552,7 +552,11 @@ async function executeCommands( let thrownError = null; let result; try { - result = await executeOperation(bulkOperation.s.collection.client, operation); + result = await executeOperation( + bulkOperation.s.collection.client, + operation, + finalOptions.timeoutContext + ); } catch (error) { thrownError = error; } @@ -866,7 +870,11 @@ export class BulkWriteShimOperation extends AbstractOperation { return 'bulkWrite' as const; } - async execute(_server: Server, session: ClientSession | undefined): Promise { + async execute( + _server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (this.options.session == null) { // An implicit session could have been created by 'executeOperation' // So if we stick it on finalOptions here, each bulk operation @@ -874,7 +882,7 @@ export class BulkWriteShimOperation extends AbstractOperation { // an explicit session would be this.options.session = session; } - return await executeCommands(this.bulkOperation, this.options); + return await executeCommands(this.bulkOperation, { ...this.options, timeoutContext }); } } @@ -1203,7 +1211,7 @@ export abstract class BulkOperationBase { const finalOptions = { ...this.s.options, ...options }; const operation = new BulkWriteShimOperation(this, finalOptions); - return await executeOperation(this.s.collection.client, operation); + return await executeOperation(this.s.collection.client, operation, finalOptions.timeoutContext); } /** diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 0d36998fd96..e276c9bbafd 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -7,6 +7,7 @@ import * as sinon from 'sinon'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, @@ -28,7 +29,7 @@ describe('CSOT spec prose tests', function () { await client?.close(); }); - context.skip('1. Multi-batch writes', () => { + describe('1. Multi-batch writes', { requires: { topology: 'single', mongodb: '>=4.4' } }, () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -55,6 +56,46 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it('performs two inserts which fail to complete before 2000 ms', async () => { + const inserts = []; + client.on('commandStarted', ev => inserts.push(ev)); + + const a = new Uint8Array(1000000 - 22); + const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); + const error = await client + .db('db') + .collection<{ _id: number; a: Uint8Array }>('coll') + .insertMany(oneMBDocs) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoBulkWriteError); + expect(error.errorResponse).to.be.instanceOf(MongoOperationTimeoutError); + expect(inserts.map(ev => ev.commandName)).to.deep.equal(['insert', 'insert']); + }); }); context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { @@ -901,4 +942,103 @@ describe('CSOT spec prose tests', function () { }); }); }); + + describe.skip( + '11. Multi-batch bulkWrites', + { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + function () { + /** + * ### 11. Multi-batch bulkWrites + * + * This test MUST only run against server versions 8.0+. This test must be skipped on Atlas Serverless. + * + * 1. Using `internalClient`, drop the `db.coll` collection. + * + * 2. Using `internalClient`, set the following fail point: + * + * @example + * ```javascript + * { + * configureFailPoint: "failCommand", + * mode: { + * times: 2 + * }, + * data: { + * failCommands: ["bulkWrite"], + * blockConnection: true, + * blockTimeMS: 1010 + * } + * } + * ``` + * + * 3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + * in the response. + * + * 4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + * + * 5. Create a list of write models (referred to as `models`) with the following write model repeated + * (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + * + * @example + * ```json + * InsertOne { + * "namespace": "db.coll", + * "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + * } + * ``` + * + * 6. Call `bulkWrite` on `client` with `models`. + * + * - Expect this to fail with a timeout error. + * + * 7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['bulkWrite'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + let maxBsonObjectSize: number; + let maxMessageSizeBytes: number; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + const hello = await internalClient.db('admin').command({ hello: 1 }); + maxBsonObjectSize = hello.maxBsonObjectSize; + maxMessageSizeBytes = hello.maxMessageSizeBytes; + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + const writes = []; + client.on('commandStarted', ev => writes.push(ev)); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + const error = await client.bulkWrite(models).catch(error => error); + + expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); + expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); + }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + } + ); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 99914fa08e7..c2e08cfc80a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -5,7 +5,6 @@ import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; const skippedSpecs = { - bulkWrite: 'TODO(NODE-6274)', 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', @@ -19,18 +18,12 @@ const skippedSpecs = { }; const skippedTests = { - 'timeoutMS can be configured on a MongoClient - insertMany on collection': 'TODO(NODE-6274)', - 'timeoutMS can be configured on a MongoClient - bulkWrite on collection': 'TODO(NODE-6274)', 'timeoutMS can be configured on a MongoClient - createChangeStream on client': 'TODO(NODE-6305)', 'timeoutMS applies to whole operation, not individual attempts - createChangeStream on client': 'TODO(NODE-6305)', 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': 'TODO(NODE-6305)', - 'timeoutMS applies to whole operation, not individual attempts - insertMany on collection': - 'TODO(NODE-6274)', - 'timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection': - 'TODO(NODE-6274)', 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', 'Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': 'TODO(DRIVERS-2965)', diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 662746b4591..931ba1c9ecc 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -788,15 +788,11 @@ export function expectErrorCheck( if (expected.isTimeoutError === false) { expect(error).to.not.be.instanceof(MongoOperationTimeoutError); } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); - } - - // TODO(NODE-6274): Check for MongoBulkWriteErrors that have a MongoOperationTimeoutError in their - // errorResponse field - if (expected.isTimeoutError === false) { - expect(error).to.not.be.instanceof(MongoOperationTimeoutError); - } else if (expected.isTimeoutError === true) { - expect(error).to.be.instanceof(MongoOperationTimeoutError); + if ('errorResponse' in error) { + expect(error.errorResponse).to.be.instanceof(MongoOperationTimeoutError); + } else { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } } if (expected.errorContains != null) { From c637ea8d1f789e8060bd3879fb4e22f210c5cd82 Mon Sep 17 00:00:00 2001 From: Warren James Date: Fri, 4 Oct 2024 10:52:21 -0400 Subject: [PATCH 105/136] feat(NODE-6275): Add CSOT support to GridFS (#4246) Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- package-lock.json | 9 +- package.json | 2 +- src/collection.ts | 10 +- src/gridfs/download.ts | 44 +++- src/gridfs/index.ts | 74 +++++-- src/gridfs/upload.ts | 191 ++++++++++++++---- src/timeout.ts | 12 ++ ...ient_side_operations_timeout.prose.test.ts | 171 +++++++++++++++- ...lient_side_operations_timeout.spec.test.ts | 5 - .../node_csot.test.ts | 167 ++++++++++++++- test/tools/unified-spec-runner/operations.ts | 37 +++- 11 files changed, 634 insertions(+), 88 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1d9cebf509b..2b3a9b897aa 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6440,10 +6440,11 @@ } }, "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.2.tgz", + "integrity": "sha512-oj+LLtvhhi8XuAQ8dll2BVjrnKxOo/7ylyQu0LsKmzyGcbrvzcyvFUOLC6rPhuJPOvnezh3MZ3/Sk9Tl1jpUpg==", "dev": true, + "license": "Apache-2.0", "dependencies": { "mongodb": "^6.0.0" }, diff --git a/package.json b/package.json index 2de0e1811f0..0c4c668726a 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.1", + "mongodb-legacy": "^6.1.2", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/collection.ts b/src/collection.ts index a73a5276f5f..62fa5bd4cba 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -501,12 +501,18 @@ export class Collection { */ async findOne(): Promise | null>; async findOne(filter: Filter): Promise | null>; - async findOne(filter: Filter, options: FindOptions): Promise | null>; + async findOne( + filter: Filter, + options: Omit + ): Promise | null>; // allow an override of the schema. async findOne(): Promise; async findOne(filter: Filter): Promise; - async findOne(filter: Filter, options?: FindOptions): Promise; + async findOne( + filter: Filter, + options?: Omit + ): Promise; async findOne( filter: Filter = {}, diff --git a/src/gridfs/download.ts b/src/gridfs/download.ts index 06dda0a92ba..19651b885ea 100644 --- a/src/gridfs/download.ts +++ b/src/gridfs/download.ts @@ -2,6 +2,7 @@ import { Readable } from 'stream'; import type { Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { FindCursor } from '../cursor/find_cursor'; import { MongoGridFSChunkError, @@ -12,6 +13,7 @@ import { import type { FindOptions } from '../operations/find'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; import type { Callback } from '../utils'; import type { GridFSChunk } from './upload'; @@ -28,7 +30,7 @@ export interface GridFSBucketReadStreamOptions { * to be returned by the stream. `end` is non-inclusive */ end?: number; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -98,8 +100,10 @@ export interface GridFSBucketReadStreamPrivate { skip?: number; start: number; end: number; + timeoutMS?: number; }; readPreference?: ReadPreference; + timeoutContext?: CSOTTimeoutContext; } /** @@ -148,7 +152,11 @@ export class GridFSBucketReadStream extends Readable { end: 0, ...options }, - readPreference + readPreference, + timeoutContext: + options?.timeoutMS != null + ? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 }) + : undefined }; } @@ -196,7 +204,8 @@ export class GridFSBucketReadStream extends Readable { async abort(): Promise { this.push(null); this.destroy(); - await this.s.cursor?.close(); + const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow(); + await this.s.cursor?.close({ timeoutMS: remainingTimeMS }); } } @@ -352,7 +361,22 @@ function init(stream: GridFSBucketReadStream): void { filter['n'] = { $gte: skip }; } } - stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 }); + + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + return stream.destroy(error); + } + + stream.s.cursor = stream.s.chunks + .find(filter, { + timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .sort({ n: 1 }); if (stream.s.readPreference) { stream.s.cursor.withReadPreference(stream.s.readPreference); @@ -371,6 +395,18 @@ function init(stream: GridFSBucketReadStream): void { return; }; + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + if (!stream.destroyed) stream.destroy(error); + return; + } + + findOneOptions.timeoutMS = remainingTimeMS; + stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => { if (stream.destroyed) return; stream.destroy(error); diff --git a/src/gridfs/index.ts b/src/gridfs/index.ts index 51c32b7a01c..de114e5e597 100644 --- a/src/gridfs/index.ts +++ b/src/gridfs/index.ts @@ -2,10 +2,12 @@ import type { ObjectId } from '../bson'; import type { Collection } from '../collection'; import type { FindCursor } from '../cursor/find_cursor'; import type { Db } from '../db'; -import { MongoRuntimeError } from '../error'; +import { MongoOperationTimeoutError, MongoRuntimeError } from '../error'; import { type Filter, TypedEventEmitter } from '../mongo_types'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; +import { resolveOptions } from '../utils'; import { WriteConcern, type WriteConcernOptions } from '../write_concern'; import type { FindOptions } from './../operations/find'; import { @@ -48,6 +50,7 @@ export interface GridFSBucketPrivate { chunkSizeBytes: number; readPreference?: ReadPreference; writeConcern: WriteConcern | undefined; + timeoutMS?: number; }; _chunksCollection: Collection; _filesCollection: Collection; @@ -81,11 +84,11 @@ export class GridFSBucket extends TypedEventEmitter { constructor(db: Db, options?: GridFSBucketOptions) { super(); this.setMaxListeners(0); - const privateOptions = { + const privateOptions = resolveOptions(db, { ...DEFAULT_GRIDFS_BUCKET_OPTIONS, ...options, writeConcern: WriteConcern.fromOptions(options) - }; + }); this.s = { db, options: privateOptions, @@ -109,7 +112,10 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, options); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options + }); } /** @@ -122,7 +128,11 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, { ...options, id }); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options, + id + }); } /** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */ @@ -135,7 +145,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { _id: id }, - options + { timeoutMS: this.s.options.timeoutMS, ...options } ); } @@ -144,11 +154,27 @@ export class GridFSBucket extends TypedEventEmitter { * * @param id - The id of the file doc */ - async delete(id: ObjectId): Promise { - const { deletedCount } = await this.s._filesCollection.deleteOne({ _id: id }); + async delete(id: ObjectId, options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + const { deletedCount } = await this.s._filesCollection.deleteOne( + { _id: id }, + { timeoutMS: timeoutContext?.remainingTimeMS } + ); + + const remainingTimeMS = timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`); // Delete orphaned chunks before returning FileNotFound - await this.s._chunksCollection.deleteMany({ files_id: id }); + await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS }); if (deletedCount === 0) { // TODO(NODE-3483): Replace with more appropriate error @@ -188,7 +214,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { filename }, - { ...options, sort, skip } + { timeoutMS: this.s.options.timeoutMS, ...options, sort, skip } ); } @@ -198,18 +224,36 @@ export class GridFSBucket extends TypedEventEmitter { * @param id - the id of the file to rename * @param filename - new name for the file */ - async rename(id: ObjectId, filename: string): Promise { + async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise { const filter = { _id: id }; const update = { $set: { filename } }; - const { matchedCount } = await this.s._filesCollection.updateOne(filter, update); + const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options); if (matchedCount === 0) { throw new MongoRuntimeError(`File with id ${id} not found`); } } /** Removes this bucket's files collection, followed by its chunks collection. */ - async drop(): Promise { - await this.s._filesCollection.drop(); - await this.s._chunksCollection.drop(); + async drop(options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.options.serverSelectionTimeoutMS + }); + } + + if (timeoutContext) { + await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS }); + const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow( + `Timed out after ${timeoutMS}ms` + ); + await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS }); + } else { + await this.s._filesCollection.drop(); + await this.s._chunksCollection.drop(); + } } } diff --git a/src/gridfs/upload.ts b/src/gridfs/upload.ts index f54d5131f66..c7544b715d8 100644 --- a/src/gridfs/upload.ts +++ b/src/gridfs/upload.ts @@ -2,7 +2,14 @@ import { Writable } from 'stream'; import { type Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; -import { MongoAPIError, MONGODB_ERROR_CODES, MongoError } from '../error'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { + MongoAPIError, + MONGODB_ERROR_CODES, + MongoError, + MongoOperationTimeoutError +} from '../error'; +import { CSOTTimeoutContext } from '../timeout'; import { type Callback, squashError } from '../utils'; import type { WriteConcernOptions } from '../write_concern'; import { WriteConcern } from './../write_concern'; @@ -35,7 +42,7 @@ export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions { * @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead. */ aliases?: string[]; - /** @internal TODO(NODE-5688): make this public */ + /** @public */ timeoutMS?: number; } @@ -97,6 +104,8 @@ export class GridFSBucketWriteStream extends Writable { * ``` */ gridFSFile: GridFSFile | null = null; + /** @internal */ + timeoutContext?: CSOTTimeoutContext; /** * @param bucket - Handle for this stream's corresponding bucket @@ -131,14 +140,11 @@ export class GridFSBucketWriteStream extends Writable { aborted: false }; - if (!this.bucket.s.calledOpenUploadStream) { - this.bucket.s.calledOpenUploadStream = true; - - checkIndexes(this).then(() => { - this.bucket.s.checkedIndexes = true; - this.bucket.emit('index'); - }, squashError); - } + if (options.timeoutMS != null) + this.timeoutContext = new CSOTTimeoutContext({ + timeoutMS: options.timeoutMS, + serverSelectionTimeoutMS: this.bucket.s.db.client.options.serverSelectionTimeoutMS + }); } /** @@ -147,10 +153,26 @@ export class GridFSBucketWriteStream extends Writable { * The stream is considered constructed when the indexes are done being created */ override _construct(callback: (error?: Error | null) => void): void { - if (this.bucket.s.checkedIndexes) { + if (!this.bucket.s.calledOpenUploadStream) { + this.bucket.s.calledOpenUploadStream = true; + + checkIndexes(this).then( + () => { + this.bucket.s.checkedIndexes = true; + this.bucket.emit('index'); + callback(); + }, + error => { + if (error instanceof MongoOperationTimeoutError) { + return handleError(this, error, callback); + } + squashError(error); + callback(); + } + ); + } else { return process.nextTick(callback); } - this.bucket.once('index', callback); } /** @@ -194,7 +216,10 @@ export class GridFSBucketWriteStream extends Writable { } this.state.aborted = true; - await this.chunks.deleteMany({ files_id: this.id }); + const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${this.timeoutContext?.timeoutMS}ms` + ); + await this.chunks.deleteMany({ files_id: this.id, timeoutMS: remainingTimeMS }); } } @@ -219,9 +244,19 @@ function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise { const index = { files_id: 1, n: 1 }; + let remainingTimeMS; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + let indexes; try { - indexes = await stream.chunks.listIndexes().toArray(); + indexes = await stream.chunks + .listIndexes({ + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -239,10 +274,14 @@ async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise }); if (!hasChunksIndex) { + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); await stream.chunks.createIndex(index, { ...stream.writeConcern, background: true, - unique: true + unique: true, + timeoutMS: remainingTimeMS }); } } @@ -270,13 +309,28 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { return; } - stream.files.insertOne(gridFSFile, { writeConcern: stream.writeConcern }).then( - () => { - stream.gridFSFile = gridFSFile; - callback(); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + + stream.files + .insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + stream.gridFSFile = gridFSFile; + callback(); + }, + error => { + return handleError(stream, error, callback); + } + ); return; } @@ -284,7 +338,16 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { } async function checkIndexes(stream: GridFSBucketWriteStream): Promise { - const doc = await stream.files.findOne({}, { projection: { _id: 1 } }); + let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const doc = await stream.files.findOne( + {}, + { + projection: { _id: 1 }, + timeoutMS: remainingTimeMS + } + ); if (doc != null) { // If at least one document exists assume the collection has the required index return; @@ -293,8 +356,15 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { const index = { filename: 1, uploadDate: 1 }; let indexes; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const listIndexesOptions = { + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }; try { - indexes = await stream.files.listIndexes().toArray(); + indexes = await stream.files.listIndexes(listIndexesOptions).toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -312,7 +382,11 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { }); if (!hasFileIndex) { - await stream.files.createIndex(index, { background: false }); + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + + await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS }); } await checkChunksIndex(stream); @@ -386,6 +460,18 @@ function doWrite( let doc: GridFSChunk; if (spaceRemaining === 0) { doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore)); + + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; ++outstandingRequests; @@ -393,17 +479,21 @@ function doWrite( return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - --outstandingRequests; - - if (!outstandingRequests) { - checkDone(stream, callback); + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + --outstandingRequests; + + if (!outstandingRequests) { + checkDone(stream, callback); + } + }, + error => { + return handleError(stream, error, callback); } - }, - error => handleError(stream, error, callback) - ); + ); spaceRemaining = stream.chunkSizeBytes; stream.pos = 0; @@ -420,8 +510,6 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return checkDone(stream, callback); } - ++stream.state.outstandingRequests; - // Create a new buffer to make sure the buffer isn't bigger than it needs // to be. const remnant = Buffer.alloc(stream.pos); @@ -433,13 +521,28 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - checkDone(stream, callback); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + checkDone(stream, callback); + }, + error => { + return handleError(stream, error, callback); + } + ); } function isAborted(stream: GridFSBucketWriteStream, callback: Callback): boolean { diff --git a/src/timeout.ts b/src/timeout.ts index f7fb3d0daa5..f694b5f4f4f 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -305,6 +305,18 @@ export class CSOTTimeoutContext extends TimeoutContext { this._serverSelectionTimeout?.clear(); this._connectionCheckoutTimeout?.clear(); } + + /** + * @internal + * Throws a MongoOperationTimeoutError if the context has expired. + * If the context has not expired, returns the `remainingTimeMS` + **/ + getRemainingTimeMSOrThrow(message?: string): number { + const { remainingTimeMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); + return remainingTimeMS; + } } /** @internal */ diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index e276c9bbafd..1b8c34633b4 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -3,15 +3,20 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; +import { Readable } from 'stream'; +import { pipeline } from 'stream/promises'; import { type CommandStartedEvent } from '../../../mongodb'; import { type CommandSucceededEvent, + GridFSBucket, MongoBulkWriteError, MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, - now + now, + ObjectId, + promiseWithResolvers } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -398,10 +403,42 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('6. GridFS - Upload', () => { + context('6. GridFS - Upload', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + if (client) { + await client.close(); + } + }); /** Tests in this section MUST only be run against server versions 4.4 and higher. */ - context('uploads via openUploadStream can be timed out', () => { + it('uploads via openUploadStream can be timed out', metadata, async function () { /** * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. * 1. Using `internalClient`, set the following fail point: @@ -424,9 +461,30 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.close()` to flush the stream and insert chunks. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const bucket = new GridFSBucket(client.db('db')); + const stream = bucket.openUploadStream('filename'); + const data = Buffer.from('13', 'hex'); + + const fileStream = Readable.from(data); + const maybeError = await pipeline(fileStream, stream).then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); }); - context('Aborting an upload stream can be timed out', () => { + it('Aborting an upload stream can be timed out', metadata, async function () { /** * This test only applies to drivers that provide an API to abort a GridFS upload stream. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -450,10 +508,92 @@ describe('CSOT spec prose tests', function () { * 1. Call `uploadStream.abort()`. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['delete'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + await internalClient.db().admin().command(failpoint); + const bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 300 }); + + const data = Buffer.from('01020304', 'hex'); + + const { promise: writePromise, resolve, reject } = promiseWithResolvers(); + uploadStream.on('error', error => uploadStream.destroy(error)); + uploadStream.write(data, error => { + if (error) reject(error); + else resolve(); + }); + let maybeError = await writePromise.then( + () => null, + e => e + ); + expect(maybeError).to.be.null; + + maybeError = await uploadStream.abort().then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + uploadStream.destroy(); }); }); - context.skip('7. GridFS - Download', () => { + context('7. GridFS - Download', () => { + let internalClient: MongoClient; + let client: MongoClient; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + + await files.insertOne({ + _id: new ObjectId('000000000000000000000005'), + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + + if (client) { + await client.close(); + } + }); + /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -495,6 +635,27 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against `db.fs.chunks`. */ + it('download streams can be timed out', metadata, async function () { + const bucket = new GridFSBucket(client.db('db')); + const downloadStream = bucket.openDownloadStream(new ObjectId('000000000000000000000005')); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); }); context('8. Server Selection', () => { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index c2e08cfc80a..49ddabc924b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -8,11 +8,6 @@ const skippedSpecs = { 'change-streams': 'TODO(NODE-6035)', 'convenient-transactions': 'TODO(NODE-5687)', 'deprecated-options': 'TODO(NODE-5689)', - 'gridfs-advanced': 'TODO(NODE-6275)', - 'gridfs-delete': 'TODO(NODE-6275)', - 'gridfs-download': 'TODO(NODE-6275)', - 'gridfs-find': 'TODO(NODE-6275)', - 'gridfs-upload': 'TODO(NODE-6275)', 'tailable-awaitData': 'TODO(NODE-6035)', 'tailable-non-awaitData': 'TODO(NODE-6035)' }; diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 56127cc8ace..b2011ee2e73 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,4 +1,7 @@ /* Anything javascript specific relating to timeouts */ +import { once } from 'node:events'; +import { Readable } from 'node:stream'; +import { pipeline } from 'node:stream/promises'; import { setTimeout } from 'node:timers/promises'; import { expect } from 'chai'; @@ -15,11 +18,13 @@ import { Connection, type Db, type FindCursor, + GridFSBucket, LEGACY_HELLO_COMMAND, type MongoClient, MongoInvalidArgumentError, MongoOperationTimeoutError, - MongoServerError + MongoServerError, + ObjectId } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -576,6 +581,166 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('GridFSBucket', () => { + const blockTimeMS = 200; + let internalClient: MongoClient; + let client: MongoClient; + let bucket: GridFSBucket; + + beforeEach(async function () { + client = this.configuration.newClient(undefined, { timeoutMS: 1000 }); + internalClient = this.configuration.newClient(undefined); + }); + + afterEach(async function () { + await client.close(); + await internalClient.db().admin().command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + }); + + context('upload', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + }); + + describe('openUploadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 175 }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + + it('only emits index event once per bucket', metadata, async function () { + let numEventsSeen = 0; + bucket.on('index', () => numEventsSeen++); + + const uploadStream0 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream0.destroy(error)); + const uploadStream1 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream1.destroy(error)); + + const data = Buffer.from('test', 'utf-8'); + await pipeline(Readable.from(data), uploadStream0); + await pipeline(Readable.from(data), uploadStream1); + + expect(numEventsSeen).to.equal(1); + }); + }); + + describe('openUploadStreamWithId', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStreamWithId(new ObjectId(), 'filename', { + timeoutMS: 175 + }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + + context('download', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS + } + }; + const _id = new ObjectId('000000000000000000000005'); + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + await files.insertOne({ + _id, + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(db); + }); + + describe('openDownloadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStream(_id, { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('openDownloadStreamByName', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStreamByName('length-10', { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + }); + describe('when using an explicit session', () => { const metadata: MongoDBMetadataUI = { requires: { topology: ['replicaset'], mongodb: '>=4.4' } diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 31414fa4664..a9f79842c31 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -11,6 +11,7 @@ import { CommandStartedEvent, Db, type Document, + GridFSBucket, type MongoClient, MongoError, ReadConcern, @@ -311,7 +312,7 @@ operations.set('dropCollection', async ({ entities, operation }) => { operations.set('drop', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.drop(); + return bucket.drop(operation.arguments); }); operations.set('dropIndexes', async ({ entities, operation }) => { @@ -529,7 +530,8 @@ operations.set('targetedFailPoint', async ({ entities, operation }) => { operations.set('delete', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.delete(operation.arguments!.id); + const { id, ...opts } = operation.arguments; + return bucket.delete(id, opts); }); operations.set('download', async ({ entities, operation }) => { @@ -537,7 +539,8 @@ operations.set('download', async ({ entities, operation }) => { const { id, ...options } = operation.arguments ?? {}; const stream = bucket.openDownloadStream(id, options); - return Buffer.concat(await stream.toArray()); + const data = Buffer.concat(await stream.toArray()); + return data; }); operations.set('downloadByName', async ({ entities, operation }) => { @@ -552,7 +555,6 @@ operations.set('downloadByName', async ({ entities, operation }) => { operations.set('upload', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); const { filename, source, ...options } = operation.arguments ?? {}; - const stream = bucket.openUploadStream(filename, options); const fileStream = Readable.from(Buffer.from(source.$$hexBytes, 'hex')); @@ -832,9 +834,30 @@ operations.set('updateOne', async ({ entities, operation }) => { }); operations.set('rename', async ({ entities, operation }) => { - const collection = entities.getEntity('collection', operation.object); - const { to, ...options } = operation.arguments!; - return collection.rename(to, options); + let entity: GridFSBucket | Collection | undefined; + try { + entity = entities.getEntity('collection', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof Collection) { + const { to, ...options } = operation.arguments!; + return entity.rename(to, options); + } + + try { + entity = entities.getEntity('bucket', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof GridFSBucket) { + const { id, newFilename, ...opts } = operation.arguments!; + return entity.rename(id, newFilename, opts as any); + } + + expect.fail(`No collection or bucket with name '${operation.object}' found`); }); operations.set('createDataKey', async ({ entities, operation }) => { From c148f6b8ccec5e84e57553c64ad6b59b73044084 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Fri, 4 Oct 2024 15:16:50 -0600 Subject: [PATCH 106/136] refactor(NODE-6411): AbstractCursor accepts an external timeout context (#4264) --- src/cmap/connection.ts | 6 +- src/cursor/abstract_cursor.ts | 134 ++++++++++++++---- src/index.ts | 1 + src/operations/find.ts | 5 +- src/timeout.ts | 17 +++ .../node_csot.test.ts | 18 ++- .../crud/find_cursor_methods.test.js | 52 +++++-- .../node-specific/abstract_cursor.test.ts | 117 ++++++++++++++- ...er_selection.prose.operation_count.test.ts | 23 +-- test/tools/utils.ts | 32 ++++- 10 files changed, 330 insertions(+), 75 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 507615e9f03..a43d6106c7b 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -422,9 +422,9 @@ export class Connection extends TypedEventEmitter { ...options }; - if (!options.omitMaxTimeMS && options.timeoutContext?.csotEnabled()) { - const { maxTimeMS } = options.timeoutContext; - if (maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + if (!options.omitMaxTimeMS) { + const maxTimeMS = options.timeoutContext?.maxTimeMS; + if (maxTimeMS && maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; } const message = this.supportsOpMsg diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index d0f386923ad..f7e488d24b2 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,7 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; -import { TimeoutContext } from '../timeout'; +import { type CSOTTimeoutContext, type Timeout, TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -119,6 +119,14 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { timeoutMS?: number; /** @internal TODO(NODE-5688): make this public */ timeoutMode?: CursorTimeoutMode; + + /** + * @internal + * + * A timeout context to govern the total time the cursor can live. If provided, the cursor + * cannot be used in ITERATION mode. + */ + timeoutContext?: CursorTimeoutContext; } /** @internal */ @@ -171,7 +179,7 @@ export abstract class AbstractCursor< /** @internal */ protected readonly cursorOptions: InternalAbstractCursorOptions; /** @internal */ - protected timeoutContext?: TimeoutContext; + protected timeoutContext?: CursorTimeoutContext; /** @event */ static readonly CLOSE = 'close' as const; @@ -205,20 +213,12 @@ export abstract class AbstractCursor< }; this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.timeoutMode == null) { - if (options.tailable) { - this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; - } else { - this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; - } - } else { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError( - "Cannot set tailable cursor's timeoutMode to LIFETIME" - ); - } - this.cursorOptions.timeoutMode = options.timeoutMode; + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); } + this.cursorOptions.timeoutMode = + options.timeoutMode ?? + (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); @@ -264,6 +264,17 @@ export abstract class AbstractCursor< utf8: options?.enableUtf8Validation === false ? false : true } }; + + if ( + options.timeoutContext != null && + options.timeoutMS != null && + this.cursorOptions.timeoutMode !== CursorTimeoutMode.LIFETIME + ) { + throw new MongoAPIError( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME.` + ); + } + this.timeoutContext = options.timeoutContext; } /** @@ -721,6 +732,9 @@ export abstract class AbstractCursor< * if the resultant data has already been retrieved by this cursor. */ rewind(): void { + if (this.timeoutContext && this.timeoutContext.owner !== this) { + throw new MongoAPIError(`Cannot rewind cursor that does not own its timeout context.`); + } if (!this.initialized) { return; } @@ -790,10 +804,13 @@ export abstract class AbstractCursor< */ private async cursorInit(): Promise { if (this.cursorOptions.timeoutMS != null) { - this.timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS: this.cursorOptions.timeoutMS - }); + this.timeoutContext ??= new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }), + this + ); } try { const state = await this._initialize(this.cursorSession); @@ -872,6 +889,20 @@ export abstract class AbstractCursor< private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; + const timeoutContextForKillCursors = (): CursorTimeoutContext | undefined => { + if (timeoutMS != null) { + this.timeoutContext?.clear(); + return new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, + timeoutMS + }), + this + ); + } else { + return this.timeoutContext?.refreshed(); + } + }; try { if ( !this.isKilled && @@ -884,23 +915,13 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; - let timeoutContext: TimeoutContext | undefined; - if (timeoutMS != null) { - this.timeoutContext?.clear(); - timeoutContext = TimeoutContext.create({ - serverSelectionTimeoutMS: this.client.options.serverSelectionTimeoutMS, - timeoutMS - }); - } else { - this.timeoutContext?.refresh(); - timeoutContext = this.timeoutContext; - } + await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session }), - timeoutContext + timeoutContextForKillCursors() ); } } catch (error) { @@ -1042,3 +1063,54 @@ class ReadableCursorStream extends Readable { } configureResourceManagement(AbstractCursor.prototype); + +/** + * @internal + * The cursor timeout context is a wrapper around a timeout context + * that keeps track of the "owner" of the cursor. For timeout contexts + * instantiated inside a cursor, the owner will be the cursor. + * + * All timeout behavior is exactly the same as the wrapped timeout context's. + */ +export class CursorTimeoutContext extends TimeoutContext { + constructor( + public timeoutContext: TimeoutContext, + public owner: symbol | AbstractCursor + ) { + super(); + } + override get serverSelectionTimeout(): Timeout | null { + return this.timeoutContext.serverSelectionTimeout; + } + override get connectionCheckoutTimeout(): Timeout | null { + return this.timeoutContext.connectionCheckoutTimeout; + } + override get clearServerSelectionTimeout(): boolean { + return this.timeoutContext.clearServerSelectionTimeout; + } + override get clearConnectionCheckoutTimeout(): boolean { + return this.timeoutContext.clearConnectionCheckoutTimeout; + } + override get timeoutForSocketWrite(): Timeout | null { + return this.timeoutContext.timeoutForSocketWrite; + } + override get timeoutForSocketRead(): Timeout | null { + return this.timeoutContext.timeoutForSocketRead; + } + override csotEnabled(): this is CSOTTimeoutContext { + return this.timeoutContext.csotEnabled(); + } + override refresh(): void { + return this.timeoutContext.refresh(); + } + override clear(): void { + return this.timeoutContext.clear(); + } + override get maxTimeMS(): number | null { + return this.timeoutContext.maxTimeMS; + } + + override refreshed(): CursorTimeoutContext { + return new CursorTimeoutContext(this.timeoutContext.refreshed(), this.owner); + } +} diff --git a/src/index.ts b/src/index.ts index e555d97e9ed..a49dc015526 100644 --- a/src/index.ts +++ b/src/index.ts @@ -359,6 +359,7 @@ export type { CursorStreamOptions } from './cursor/abstract_cursor'; export type { + CursorTimeoutContext, InitialCursorResponse, InternalAbstractCursorOptions } from './cursor/abstract_cursor'; diff --git a/src/operations/find.ts b/src/operations/find.ts index e50b2762449..10453d141da 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,6 +1,6 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; -import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { type ExplainOptions } from '../explain'; import { ReadConcern } from '../read_concern'; @@ -18,7 +18,8 @@ import { Aspect, defineAspects, type Hint } from './operation'; */ // eslint-disable-next-line @typescript-eslint/no-unused-vars export interface FindOptions - extends Omit { + extends Omit, + AbstractCursorOptions { /** Sets the limit of documents returned in the query. */ limit?: number; /** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */ diff --git a/src/timeout.ts b/src/timeout.ts index f694b5f4f4f..9041ce4b88d 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -178,6 +178,8 @@ export abstract class TimeoutContext { else throw new MongoRuntimeError('Unrecognized options'); } + abstract get maxTimeMS(): number | null; + abstract get serverSelectionTimeout(): Timeout | null; abstract get connectionCheckoutTimeout(): Timeout | null; @@ -195,6 +197,9 @@ export abstract class TimeoutContext { abstract refresh(): void; abstract clear(): void; + + /** Returns a new instance of the TimeoutContext, with all timeouts refreshed and restarted. */ + abstract refreshed(): TimeoutContext; } /** @internal */ @@ -317,6 +322,10 @@ export class CSOTTimeoutContext extends TimeoutContext { throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); return remainingTimeMS; } + + override refreshed(): CSOTTimeoutContext { + return new CSOTTimeoutContext(this); + } } /** @internal */ @@ -363,4 +372,12 @@ export class LegacyTimeoutContext extends TimeoutContext { clear(): void { return; } + + get maxTimeMS() { + return null; + } + + override refreshed(): LegacyTimeoutContext { + return new LegacyTimeoutContext(this.options); + } } diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b2011ee2e73..f4cfc7d882c 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -26,7 +26,7 @@ import { MongoServerError, ObjectId } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; @@ -362,7 +362,7 @@ describe('CSOT driver tests', metadata, () => { }; beforeEach(async function () { - internalClient = this.configuration.newClient(); + internalClient = this.configuration.newClient({}); await internalClient .db('db') .dropCollection('coll') @@ -378,7 +378,11 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize: 10 }); + + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); @@ -492,7 +496,13 @@ describe('CSOT driver tests', metadata, () => { await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + minPoolSize: 10 + }); + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + commandStarted = []; commandSucceeded = []; client.on('commandStarted', ev => commandStarted.push(ev)); diff --git a/test/integration/crud/find_cursor_methods.test.js b/test/integration/crud/find_cursor_methods.test.js index 42eeda3e816..21a6649bf0b 100644 --- a/test/integration/crud/find_cursor_methods.test.js +++ b/test/integration/crud/find_cursor_methods.test.js @@ -1,7 +1,13 @@ 'use strict'; const { expect } = require('chai'); const { filterForCommands } = require('../shared'); -const { promiseWithResolvers, MongoCursorExhaustedError } = require('../../mongodb'); +const { + promiseWithResolvers, + MongoCursorExhaustedError, + CursorTimeoutContext, + TimeoutContext, + MongoAPIError +} = require('../../mongodb'); describe('Find Cursor', function () { let client; @@ -246,23 +252,45 @@ describe('Find Cursor', function () { }); context('#rewind', function () { - it('should rewind a cursor', function (done) { + it('should rewind a cursor', async function () { const coll = client.db().collection('abstract_cursor'); const cursor = coll.find({}); - this.defer(() => cursor.close()); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + try { + let docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); cursor.rewind(); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); + } finally { + await cursor.close(); + } + }); - done(); - }); - }); + it('throws if the cursor does not own its timeoutContext', async function () { + const coll = client.db().collection('abstract_cursor'); + const cursor = coll.find( + {}, + { + timeoutContext: new CursorTimeoutContext( + TimeoutContext.create({ + timeoutMS: 1000, + serverSelectionTimeoutMS: 1000 + }), + Symbol() + ) + } + ); + + try { + cursor.rewind(); + expect.fail(`rewind should have thrown.`); + } catch (error) { + expect(error).to.be.instanceOf(MongoAPIError); + } finally { + await cursor.close(); + } }); it('should end an implicit session on rewind', { diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index a5e7fba13dd..136e72a3499 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,12 +7,17 @@ import { inspect } from 'util'; import { AbstractCursor, type Collection, + CursorTimeoutContext, + CursorTimeoutMode, type FindCursor, MongoAPIError, type MongoClient, MongoCursorExhaustedError, - MongoServerError + MongoOperationTimeoutError, + MongoServerError, + TimeoutContext } from '../../mongodb'; +import { type FailPoint } from '../../tools/utils'; describe('class AbstractCursor', function () { describe('regression tests NODE-5372', function () { @@ -395,4 +400,114 @@ describe('class AbstractCursor', function () { expect(nextSpy.callCount).to.be.lessThan(numDocuments); }); }); + + describe('externally provided timeout contexts', function () { + let client: MongoClient; + let collection: Collection; + let context: CursorTimeoutContext; + + beforeEach(async function () { + client = this.configuration.newClient(); + + collection = client.db('abstract_cursor_integration').collection('test'); + + context = new CursorTimeoutContext( + TimeoutContext.create({ timeoutMS: 1000, serverSelectionTimeoutMS: 2000 }), + Symbol() + ); + + await collection.insertMany([{ a: 1 }, { b: 2 }, { c: 3 }]); + }); + + afterEach(async function () { + await collection.deleteMany({}); + await client.close(); + }); + + describe('when timeoutMode != LIFETIME', function () { + it('an error is thrown', function () { + expect(() => + collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.ITERATION } + ) + ).to.throw( + `cannot create a cursor with an externally provided timeout context that doesn't use timeoutMode=CURSOR_LIFETIME` + ); + }); + }); + + describe('when timeoutMode is omitted', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find({}, { timeoutContext: context, timeoutMS: 1000 }); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when timeoutMode is LIFETIME', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor is initialized', function () { + it('the provided timeoutContext is not overwritten', async function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + await cursor.toArray(); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor refreshes the timeout for killCursors', function () { + it( + 'the provided timeoutContext is not modified', + { + requires: { + mongodb: '>=4.4' + } + }, + async function () { + await client.db('admin').command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 5000 + } + } as FailPoint); + + const cursor = collection.find( + {}, + { + timeoutContext: context, + timeoutMS: 1000, + timeoutMode: CursorTimeoutMode.LIFETIME, + batchSize: 1 + } + ); + + const error = await cursor.toArray().catch(e => e); + + expect(error).to.be.instanceof(MongoOperationTimeoutError); + // @ts-expect-error We know we have a CSOT timeout context but TS does not. + expect(context.timeoutContext.remainingTimeMS).to.be.lessThan(0); + } + ); + }); + }); }); diff --git a/test/integration/server-selection/server_selection.prose.operation_count.test.ts b/test/integration/server-selection/server_selection.prose.operation_count.test.ts index fec6d24e61c..b4a7d9bf47b 100644 --- a/test/integration/server-selection/server_selection.prose.operation_count.test.ts +++ b/test/integration/server-selection/server_selection.prose.operation_count.test.ts @@ -1,5 +1,4 @@ import { expect } from 'chai'; -import { on } from 'events'; import { type Collection, @@ -7,7 +6,7 @@ import { HostAddress, type MongoClient } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { waitUntilPoolsFilled } from '../../tools/utils'; const failPoint = { configureFailPoint: 'failCommand', @@ -28,17 +27,6 @@ async function runTaskGroup(collection: Collection, count: 10 | 100 | 1000) { } } -async function ensurePoolIsFull(client: MongoClient): Promise { - let connectionCount = 0; - - for await (const _event of on(client, 'connectionCreated')) { - connectionCount++; - if (connectionCount === POOL_SIZE * 2) { - break; - } - } -} - // Step 1: Configure a sharded cluster with two mongoses. Use a 4.2.9 or newer server version. const TEST_METADATA: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } }; @@ -75,15 +63,8 @@ describe('operationCount-based Selection Within Latency Window - Prose Test', fu client.on('commandStarted', updateCount); - const poolIsFullPromise = ensurePoolIsFull(client); - - await client.connect(); - // Step 4: Using CMAP events, ensure the client's connection pools for both mongoses have been saturated - const poolIsFull = Promise.race([poolIsFullPromise, sleep(30 * 1000)]); - if (!poolIsFull) { - throw new Error('Timed out waiting for connection pool to fill to minPoolSize'); - } + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), POOL_SIZE * 2); seeds = client.topology.s.seedlist.map(address => address.toString()); diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 3cb50d2cd51..8614bd7d64c 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -1,5 +1,5 @@ import * as child_process from 'node:child_process'; -import { once } from 'node:events'; +import { on, once } from 'node:events'; import * as fs from 'node:fs/promises'; import * as path from 'node:path'; @@ -568,3 +568,33 @@ export async function itInNodeProcess( } }); } + +/** + * Connects the client and waits until `client` has emitted `count` connectionCreated events. + * + * **This will hang if the client does not have a maxPoolSizeSet!** + * + * This is useful when you want to ensure that the client has pools that are full of connections. + * + * This does not guarantee that all pools that the client has are completely full unless + * count = number of servers to which the client is connected * maxPoolSize. But it can + * serve as a way to ensure that some connections have been established and are in the pools. + */ +export async function waitUntilPoolsFilled( + client: MongoClient, + signal: AbortSignal, + count: number = client.s.options.maxPoolSize +): Promise { + let connectionCount = 0; + + async function wait$() { + for await (const _event of on(client, 'connectionCreated', { signal })) { + connectionCount++; + if (connectionCount >= count) { + break; + } + } + } + + await Promise.all([wait$(), client.connect()]); +} From 4488bab2c91bfd36208014841636804878c48315 Mon Sep 17 00:00:00 2001 From: Warren James Date: Mon, 7 Oct 2024 13:07:46 -0400 Subject: [PATCH 107/136] feat(NODE-6305): Add CSOT support to tailable cursors (#4218) Co-authored-by: Neal Beeken --- src/cursor/abstract_cursor.ts | 51 +++- src/cursor/run_command_cursor.ts | 2 + src/mongo_client.ts | 5 + src/operations/create_collection.ts | 1 + test/benchmarks/driverBench/common.js | 4 +- ...ient_side_operations_timeout.prose.test.ts | 40 ++-- ...lient_side_operations_timeout.spec.test.ts | 7 +- .../node_csot.test.ts | 221 +++++++++++++++++- .../tailable-awaitData.json | 146 ++++++++++++ .../tailable-non-awaitData.json | 151 ++++++++++++ test/tools/unified-spec-runner/operations.ts | 45 +++- 11 files changed, 641 insertions(+), 32 deletions(-) create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json create mode 100644 test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index f7e488d24b2..255a977a5f9 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -209,12 +209,35 @@ export abstract class AbstractCursor< options.readPreference && options.readPreference instanceof ReadPreference ? options.readPreference : ReadPreference.primary, - ...pluckBSONSerializeOptions(options) + ...pluckBSONSerializeOptions(options), + timeoutMS: options.timeoutMS, + tailable: options.tailable, + awaitData: options.awaitData }; - this.cursorOptions.timeoutMS = options.timeoutMS; if (this.cursorOptions.timeoutMS != null) { - if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { - throw new MongoInvalidArgumentError("Cannot set tailable cursor's timeoutMode to LIFETIME"); + if (options.timeoutMode == null) { + if (options.tailable) { + this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; + + if (options.awaitData) { + if ( + options.maxAwaitTimeMS != null && + options.maxAwaitTimeMS >= this.cursorOptions.timeoutMS + ) + throw new MongoInvalidArgumentError( + 'Cannot specify maxAwaitTimeMS >= timeoutMS for a tailable awaitData cursor' + ); + } + } else { + this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; + } + } else { + if (options.tailable && this.cursorOptions.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError( + "Cannot set tailable cursor's timeoutMode to LIFETIME" + ); + } + this.cursorOptions.timeoutMode = options.timeoutMode; } this.cursorOptions.timeoutMode = options.timeoutMode ?? @@ -223,6 +246,8 @@ export abstract class AbstractCursor< if (options.timeoutMode != null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } + + // Set for initial command this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null && ((this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && @@ -781,15 +806,17 @@ export abstract class AbstractCursor< 'Unexpected null selectedServer. A cursor creating command should have set this' ); } + const getMoreOptions = { + ...this.cursorOptions, + session: this.cursorSession, + batchSize + }; + const getMoreOperation = new GetMoreOperation( this.cursorNamespace, this.cursorId, this.selectedServer, - { - ...this.cursorOptions, - session: this.cursorSession, - batchSize - } + getMoreOptions ); return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); @@ -814,6 +841,8 @@ export abstract class AbstractCursor< } try { const state = await this._initialize(this.cursorSession); + // Set omitMaxTimeMS to the value needed for subsequent getMore calls + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; const response = state.response; this.selectedServer = state.server; this.cursorId = response.id; @@ -866,9 +895,9 @@ export abstract class AbstractCursor< } catch (error) { try { await this.cleanup(undefined, error); - } catch (error) { + } catch (cleanupError) { // `cleanupCursor` should never throw, squash and throw the original error - squashError(error); + squashError(cleanupError); } throw error; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 6b31ce2263a..90e4a94fd42 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -23,6 +23,8 @@ export type RunCursorCommandOptions = { timeoutMS?: number; /** @internal */ timeoutMode?: CursorTimeoutMode; + tailable?: boolean; + awaitData?: boolean; } & BSONSerializeOptions; /** @public */ diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 49201910362..cb66fb0bfd2 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -483,6 +483,11 @@ export class MongoClient extends TypedEventEmitter implements return this.s.bsonOptions; } + /** @internal */ + get timeoutMS(): number | undefined { + return this.options.timeoutMS; + } + /** * Executes a client bulk write operation, available on server 8.0+. * @param models - The client bulk write models. diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index afb2680b9a0..293ecc8be52 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -17,6 +17,7 @@ import { Aspect, defineAspects } from './operation'; const ILLEGAL_COMMAND_FIELDS = new Set([ 'w', 'wtimeout', + 'timeoutMS', 'j', 'fsync', 'autoIndexId', diff --git a/test/benchmarks/driverBench/common.js b/test/benchmarks/driverBench/common.js index bb5b48babfd..3ffd309572a 100644 --- a/test/benchmarks/driverBench/common.js +++ b/test/benchmarks/driverBench/common.js @@ -24,7 +24,9 @@ function loadSpecString(filePath) { } function makeClient() { - this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017'); + this.client = new MongoClient(process.env.MONGODB_URI || 'mongodb://127.0.0.1:27017', { + timeoutMS: 0 + }); } function connectClient() { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1b8c34633b4..09b95d6dff0 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -77,7 +77,7 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { await internalClient .db('db') - .collection('coll') + .collection('bulkWriteTest') .drop() .catch(() => null); await internalClient.db('admin').command(failpoint); @@ -93,7 +93,7 @@ describe('CSOT spec prose tests', function () { const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); const error = await client .db('db') - .collection<{ _id: number; a: Uint8Array }>('coll') + .collection<{ _id: number; a: Uint8Array }>('bulkWriteTest') .insertMany(oneMBDocs) .catch(error => error); @@ -265,6 +265,7 @@ describe('CSOT spec prose tests', function () { }); context('5. Blocking Iteration Methods', () => { + const metadata = { requires: { mongodb: '>=4.4' } }; /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an @@ -276,7 +277,7 @@ describe('CSOT spec prose tests', function () { data: { failCommands: ['getMore'], blockConnection: true, - blockTimeMS: 20 + blockTimeMS: 90 } }; let internalClient: MongoClient; @@ -286,7 +287,11 @@ describe('CSOT spec prose tests', function () { beforeEach(async function () { internalClient = this.configuration.newClient(); - await internalClient.db('db').dropCollection('coll'); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); // Creating capped collection to be able to create tailable find cursor const coll = await internalClient .db('db') @@ -294,7 +299,13 @@ describe('CSOT spec prose tests', function () { await coll.insertOne({ x: 1 }); await internalClient.db().admin().command(failpoint); - client = this.configuration.newClient(undefined, { timeoutMS: 20, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + monitorCommands: true, + timeoutMS: 100, + minPoolSize: 20 + }); + await client.connect(); + commandStarted = []; commandSucceeded = []; @@ -337,11 +348,11 @@ describe('CSOT spec prose tests', function () { * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('send correct number of finds and getMores', async function () { + it('send correct number of finds and getMores', metadata, async function () { const cursor = client .db('db') .collection('coll') - .find({}, { tailable: true, awaitData: true }) + .find({}, { tailable: true }) .project({ _id: 0 }); const doc = await cursor.next(); expect(doc).to.deep.equal({ x: 1 }); @@ -358,7 +369,7 @@ describe('CSOT spec prose tests', function () { expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); // Expect 2 getMore expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); - }).skipReason = 'TODO(NODE-6305)'; + }); }); context('Change Streams', () => { @@ -383,8 +394,11 @@ describe('CSOT spec prose tests', function () { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ - it.skip('sends correct number of aggregate and getMores', async function () { - const changeStream = client.db('db').collection('coll').watch(); + it.skip('sends correct number of aggregate and getMores', metadata, async function () { + const changeStream = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: 20, maxAwaitTimeMS: 19 }); const maybeError = await changeStream.next().then( () => null, e => e @@ -397,9 +411,9 @@ describe('CSOT spec prose tests', function () { const getMores = commandStarted.filter(e => e.command.getMore != null).map(e => e.command); // Expect 1 aggregate expect(aggregates).to.have.lengthOf(1); - // Expect 1 getMore - expect(getMores).to.have.lengthOf(1); - }).skipReason = 'TODO(NODE-6305)'; + // Expect 2 getMores + expect(getMores).to.have.lengthOf(2); + }).skipReason = 'TODO(NODE-6387)'; }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 49ddabc924b..d72e9bc5ebe 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -25,7 +25,12 @@ const skippedTests = { 'Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset': 'TODO(DRIVERS-2965)', 'maxTimeMS value in the command is less than timeoutMS': - 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs' + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965)', + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(DRIVERS-2965)', + 'timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs' // Skipping for both tailable awaitData and tailable non-awaitData cursors }; describe('CSOT spec tests', function () { diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index f4cfc7d882c..b1516454cc7 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -31,13 +31,18 @@ import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; describe('CSOT driver tests', metadata, () => { + // NOTE: minPoolSize here is set to ensure that connections are available when testing timeout + // behaviour. This reduces flakiness in our tests since operations will not spend time + // establishing connections, more closely mirroring long-running application behaviour + const minPoolSize = 20; + describe('timeoutMS inheritance', () => { let client: MongoClient; let db: Db; let coll: Collection; beforeEach(async function () { - client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + client = this.configuration.newClient(undefined, { timeoutMS: 100, minPoolSize }); db = client.db('test', { timeoutMS: 200 }); }); @@ -159,7 +164,10 @@ describe('CSOT driver tests', metadata, () => { metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, test: async function () { const commandsStarted = []; - client = this.configuration.newClient(undefined, { timeoutMS: 1, monitorCommands: true }); + client = this.configuration.newClient(undefined, { + timeoutMS: 1, + monitorCommands: true + }); client.on('commandStarted', ev => commandsStarted.push(ev)); @@ -591,6 +599,211 @@ describe('CSOT driver tests', metadata, () => { }); }); + describe('Tailable cursors', function () { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate', 'find', 'getMore'], + blockConnection: true, + blockTimeMS: 100 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + + await internalClient.db('db').createCollection('coll', { capped: true, size: 1_000_000 }); + + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 100 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize }); + commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client.connect(); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('when in ITERATION mode', function () { + context('awaitData cursors', function () { + let cursor: FindCursor; + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, awaitData: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, awaitData: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not use timeoutMS to compute maxTimeMS for getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 10_000, tailable: true, awaitData: true, batchSize: 1 }); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.not.haveOwnProperty('maxTimeMS'); + }); + + context('when maxAwaitTimeMS is specified', function () { + it( + 'sets maxTimeMS to the configured maxAwaitTimeMS value on getMores', + metadata, + async function () { + cursor = client.db('db').collection('coll').find( + {}, + { + timeoutMS: 10_000, + tailable: true, + awaitData: true, + batchSize: 1, + maxAwaitTimeMS: 100 + } + ); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.haveOwnProperty('maxTimeMS'); + expect(getMore.maxTimeMS).to.equal(100); + } + ); + }); + }); + + context('non-awaitData cursors', function () { + let cursor: FindCursor; + + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not append a maxTimeMS field to original command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + + const finds = commandStarted.filter(x => x.command.find != null); + expect(finds).to.have.lengthOf(1); + expect(finds[0].command.find).to.exist; + expect(finds[0].command.maxTimeMS).to.not.exist; + }); + it('does not append a maxTimeMS field to subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted.filter(x => x.command.getMore != null); + + expect(getMores).to.have.lengthOf(1); + expect(getMores[0].command.getMore).to.exist; + expect(getMores[0].command.getMore.maxTimeMS).to.not.exist; + }); + }); + }); + }); + describe('GridFSBucket', () => { const blockTimeMS = 200; let internalClient: MongoClient; @@ -798,6 +1011,10 @@ describe('CSOT driver tests', metadata, () => { beforeEach(async function () { client = this.configuration.newClient({ timeoutMS: 123 }); + await client + .db('db') + .dropCollection('coll') + .catch(() => null); }); afterEach(async function () { diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json new file mode 100644 index 00000000000..17da3e3c0c9 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json @@ -0,0 +1,146 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json new file mode 100644 index 00000000000..80cf74a1116 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json @@ -0,0 +1,151 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index a9f79842c31..f7c34a70239 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -268,7 +268,18 @@ operations.set('createCollection', async ({ entities, operation }) => { operations.set('createFindCursor', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.find(filter, opts); // The spec dictates that we create the cursor and force the find command // to execute, but don't move the cursor forward. hasNext() accomplishes @@ -332,7 +343,18 @@ operations.set('find', async ({ entities, operation }) => { } else { queryable = entities.getEntity('collection', operation.object); } - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } return queryable.find(filter, opts).toArray(); }); @@ -804,10 +826,25 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc operations.set('createCommandCursor', async ({ entities, operation }: OperationFunctionParams) => { const collection = entities.getEntity('db', operation.object); - const { command, ...opts } = operation.arguments!; + const { command, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + tailable: opts.tailable, + awaitData: opts.awaitData, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); From c28a365a57c90907e14b982c9b9892e6c5337c0e Mon Sep 17 00:00:00 2001 From: Aditi Khare <106987683+aditi-khare-mongoDB@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:37:08 -0400 Subject: [PATCH 108/136] feat(NODE-6389): add support for timeoutMS in StateMachine.execute() (#4243) Co-authored-by: Warren James Co-authored-by: Neal Beeken Co-authored-by: Bailey Pearson --- src/client-side-encryption/state_machine.ts | 88 +++++++---- src/sdam/server.ts | 4 + ...ient_side_operations_timeout.prose.test.ts | 87 +++++++++-- ...lient_side_operations_timeout.unit.test.ts | 104 +++++++++++-- .../state_machine.test.ts | 143 +++++++++++++++++- 5 files changed, 371 insertions(+), 55 deletions(-) diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index af3ea4c215d..f47ee191b54 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -12,7 +12,9 @@ import { } from '../bson'; import { type ProxyOptions } from '../cmap/connection'; import { getSocks, type SocksLib } from '../deps'; +import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; +import { Timeout, type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -173,6 +175,7 @@ export type StateMachineOptions = { * An internal class that executes across a MongoCryptContext until either * a finishing state or an error is reached. Do not instantiate directly. */ +// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs export class StateMachine { constructor( private options: StateMachineOptions, @@ -182,7 +185,11 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext): Promise { + async execute( + executor: StateMachineExecutable, + context: MongoCryptContext, + timeoutContext?: TimeoutContext + ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -201,8 +208,13 @@ export class StateMachine { 'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined' ); } - const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); + const collInfo = await this.fetchCollectionInfo( + metaDataClient, + context.ns, + filter, + timeoutContext + ); if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -222,9 +234,9 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind(this, mongocryptdClient, context.ns, command) + this.markCommand.bind(this, mongocryptdClient, context.ns, command, timeoutContext) ) - : await this.markCommand(mongocryptdClient, context.ns, command); + : await this.markCommand(mongocryptdClient, context.ns, command, timeoutContext); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -233,7 +245,12 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); - const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); + const keys = await this.fetchKeys( + keyVaultClient, + keyVaultNamespace, + filter, + timeoutContext + ); if (keys.length === 0) { // See docs on EMPTY_V @@ -255,9 +272,7 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - const requests = Array.from(this.requests(context)); - await Promise.all(requests); - + await Promise.all(this.requests(context, timeoutContext)); context.finishKMSRequests(); break; } @@ -299,7 +314,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutContext?: TimeoutContext): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -329,10 +344,6 @@ export class StateMachine { } } - function ontimeout() { - return new MongoCryptError('KMS request timed out'); - } - function onerror(cause: Error) { return new MongoCryptError('KMS request failed', { cause }); } @@ -364,7 +375,6 @@ export class StateMachine { resolve: resolveOnNetSocketConnect } = promiseWithResolvers(); netSocket - .once('timeout', () => rejectOnNetSocketError(ontimeout())) .once('error', err => rejectOnNetSocketError(onerror(err))) .once('close', () => rejectOnNetSocketError(onclose())) .once('connect', () => resolveOnNetSocketConnect()); @@ -410,8 +420,8 @@ export class StateMachine { reject: rejectOnTlsSocketError, resolve } = promiseWithResolvers(); + socket - .once('timeout', () => rejectOnTlsSocketError(ontimeout())) .once('error', err => rejectOnTlsSocketError(onerror(err))) .once('close', () => rejectOnTlsSocketError(onclose())) .on('data', data => { @@ -425,20 +435,26 @@ export class StateMachine { resolve(); } }); - await willResolveKmsRequest; + await (timeoutContext?.csotEnabled() + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) + : willResolveKmsRequest); + } catch (error) { + if (error instanceof TimeoutError) + throw new MongoOperationTimeoutError('KMS request timed out'); + throw error; } finally { // There's no need for any more activity on this socket at this point. destroySockets(); } } - *requests(context: MongoCryptContext) { + *requests(context: MongoCryptContext, timeoutContext?: TimeoutContext) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request); + yield this.kmsRequest(request, timeoutContext); } } @@ -498,7 +514,8 @@ export class StateMachine { async fetchCollectionInfo( client: MongoClient, ns: string, - filter: Document + filter: Document, + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); @@ -506,7 +523,10 @@ export class StateMachine { .db(db) .listCollections(filter, { promoteLongs: false, - promoteValues: false + promoteValues: false, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {}) }) .toArray(); @@ -522,12 +542,22 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array): Promise { - const options = { promoteLongs: false, promoteValues: false }; + async markCommand( + client: MongoClient, + ns: string, + command: Uint8Array, + timeoutContext?: TimeoutContext + ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const rawCommand = deserialize(command, options); + const bsonOptions = { promoteLongs: false, promoteValues: false }; + const rawCommand = deserialize(command, bsonOptions); - const response = await client.db(db).command(rawCommand, options); + const response = await client.db(db).command(rawCommand, { + ...bsonOptions, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS } + : undefined) + }); return serialize(response, this.bsonOptions); } @@ -543,7 +573,8 @@ export class StateMachine { fetchKeys( client: MongoClient, keyVaultNamespace: string, - filter: Uint8Array + filter: Uint8Array, + timeoutContext?: TimeoutContext ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -551,7 +582,12 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter)) + .find( + deserialize(filter), + timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS, timeoutMode: 'cursorLifetime' } + : {} + ) .toArray(); } } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 08325086d53..7ab2d9a043f 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -311,6 +311,10 @@ export class Server extends TypedEventEmitter { delete finalOptions.readPreference; } + if (this.description.iscryptd) { + finalOptions.omitMaxTimeMS = true; + } + const session = finalOptions.session; let conn = session?.pinnedConnection; diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 09b95d6dff0..80da92e10a3 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,5 +1,7 @@ /* Specification prose tests */ +import { type ChildProcess, spawn } from 'node:child_process'; + import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; @@ -16,7 +18,8 @@ import { MongoServerSelectionError, now, ObjectId, - promiseWithResolvers + promiseWithResolvers, + squashError } from '../../mongodb'; import { type FailPoint } from '../../tools/utils'; @@ -103,17 +106,55 @@ describe('CSOT spec prose tests', function () { }); }); - context.skip('2. maxTimeMS is not set for commands sent to mongocryptd', () => { - /** - * This test MUST only be run against enterprise server versions 4.2 and higher. - * - * 1. Launch a mongocryptd process on 23000. - * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. - * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. - * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. - */ - }); + context( + '2. maxTimeMS is not set for commands sent to mongocryptd', + { requires: { mongodb: '>=4.2' } }, + () => { + /** + * This test MUST only be run against enterprise server versions 4.2 and higher. + * + * 1. Launch a mongocryptd process on 23000. + * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. + * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. + * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + */ + + let client: MongoClient; + const mongocryptdTestPort = '23000'; + let childProcess: ChildProcess; + + beforeEach(async function () { + childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { + stdio: 'ignore', + detached: true + }); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { + monitorCommands: true + }); + }); + + afterEach(async function () { + await client.close(); + childProcess.kill('SIGKILL'); + sinon.restore(); + }); + + it('maxTimeMS is not set', async function () { + const commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client + .db('admin') + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(commandStarted).to.have.lengthOf(1); + expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); + }); + } + ); + // TODO(NODE-6391): Add timeoutMS support to Explicit Encryption context.skip('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, @@ -720,6 +761,30 @@ describe('CSOT spec prose tests', function () { 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 944d9b96048..7387099a7f1 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -6,8 +6,22 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; - -import { ConnectionPool, type MongoClient, Timeout, TimeoutContext, Topology } from '../../mongodb'; +import { setTimeout } from 'timers'; +import { TLSSocket } from 'tls'; +import { promisify } from 'util'; + +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + ConnectionPool, + CSOTTimeoutContext, + type MongoClient, + MongoOperationTimeoutError, + Timeout, + TimeoutContext, + Topology +} from '../../mongodb'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec unit tests', function () { @@ -93,17 +107,83 @@ describe('CSOT spec unit tests', function () { }).skipReason = 'TODO(NODE-5682): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; - context.skip('Client side encryption', function () { - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); + describe('Client side encryption', function () { + describe('KMS requests', function () { + const stateMachine = new StateMachine({} as any); + const request = { + addResponse: _response => {}, + status: { + type: 1, + code: 1, + message: 'notARealStatus' + }, + bytesNeeded: 500, + kmsProvider: 'notRealAgain', + endpoint: 'fake', + message: Buffer.from('foobar') + }; + + context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) {}); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request times out through remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); + + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); + }); + }); + }); - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - }).skipReason = 'TODO(NODE-5686): Add CSOT support to client side encryption'; + // TODO(NODE-6390): Add timeoutMS support to Auto Encryption + it.skip('The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', () => {}); + }); context.skip('Background Connection Pooling', function () { context( diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..95bb6056355 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -12,9 +12,17 @@ import * as tls from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { Db } from '../../../src/db'; -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { MongoClient } from '../../../src/mongo_client'; -import { Int32, Long, serialize } from '../../mongodb'; +import { + BSON, + Collection, + CSOTTimeoutContext, + Int32, + Long, + MongoClient, + serialize, + squashError +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; describe('StateMachine', function () { class MockRequest implements MongoCryptKMSRequest { @@ -74,12 +82,10 @@ describe('StateMachine', function () { const options = { promoteLongs: false, promoteValues: false }; const serializedCommand = serialize(command); const stateMachine = new StateMachine({} as any); - // eslint-disable-next-line @typescript-eslint/no-empty-function - const callback = () => {}; context('when executing the command', function () { it('does not promote values', function () { - stateMachine.markCommand(clientStub, 'test.coll', serializedCommand, callback); + stateMachine.markCommand(clientStub, 'test.coll', serializedCommand); expect(runCommandStub.calledWith(command, options)).to.be.true; }); }); @@ -461,4 +467,129 @@ describe('StateMachine', function () { expect.fail('missed exception'); }); }); + + describe('CSOT', function () { + describe('#fetchKeys', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let findSpy; + + beforeEach(async function () { + findSpy = sinon.spy(Collection.prototype, 'find'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.fetchKeys() is passed a `CSOTimeoutContext`', function () { + it('collection.find runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.fetchKeys() is not passed a `CSOTimeoutContext`', function () { + it('collection.find runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(findSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#markCommand', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let dbCommandSpy; + + beforeEach(async function () { + dbCommandSpy = sinon.spy(Db.prototype, 'command'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.markCommand() is passed a `CSOTimeoutContext`', function () { + it('db.command runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.markCommand() is not passed a `CSOTimeoutContext`', function () { + it('db.command runs with an undefined timeoutMS property', async function () { + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let listCollectionsSpy; + + beforeEach(async function () { + listCollectionsSpy = sinon.spy(Db.prototype, 'listCollections'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context( + 'when StateMachine.fetchCollectionInfo() is passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + } + ); + + context( + 'when StateMachine.fetchCollectionInfo() is not passed a `CSOTimeoutContext`', + function () { + it('listCollections runs with an undefined timeoutMS property', async function () { + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(listCollectionsSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + } + ); + }); + }); }); From 85d39ecb47eb590a14e3187e5f1f53ed78c01655 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Oct 2024 16:44:47 -0400 Subject: [PATCH 109/136] fix(NODE-6412): read stale response from previously timed out connection (#4273) --- src/cmap/connection.ts | 5 +- ...lient_side_operations_timeout.spec.test.ts | 6 +++ .../node_csot.test.ts | 46 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index a43d6106c7b..a58ef566b7c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -747,9 +747,12 @@ export class Connection extends TypedEventEmitter { } } catch (readError) { if (TimeoutError.is(readError)) { - throw new MongoOperationTimeoutError( + const error = new MongoOperationTimeoutError( `Timed out during socket read (${readError.duration}ms)` ); + this.dataEvents = null; + this.onError(error); + throw error; } throw readError; } finally { diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index d72e9bc5ebe..c519da8039f 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -48,6 +48,12 @@ describe('CSOT spec tests', function () { runUnifiedSuite(specs, (test, configuration) => { const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + configuration.topologyType === 'LoadBalanced' && + test.description === 'timeoutMS is refreshed for close' + ) { + return 'LoadBalanced cannot refresh timeoutMS and run expected killCursors because pinned connection has been closed by the timeout'; + } if ( sessionCSOTTests.includes(test.description) && configuration.topologyType === 'ReplicaSetWithPrimary' && diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b1516454cc7..68d7b16f54d 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1116,4 +1116,50 @@ describe('CSOT driver tests', metadata, () => { ); }); }); + + describe('Connection after timeout', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500 }); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 700 + } + }; + + await client.db('admin').command(failpoint); + }); + + afterEach(async function () { + await client.close(); + }); + + it('closes so pending messages are not read by another operation', async function () { + const cmap = []; + client.on('connectionCheckedOut', ev => cmap.push(ev)); + client.on('connectionClosed', ev => cmap.push(ev)); + + const error = await client + .db('socket') + .collection('closes') + .insertOne({}) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(cmap).to.have.lengthOf(2); + + const [checkedOut, closed] = cmap; + expect(checkedOut).to.have.property('name', 'connectionCheckedOut'); + expect(closed).to.have.property('name', 'connectionClosed'); + expect(checkedOut).to.have.property('connectionId', closed.connectionId); + }); + }); }); From 450b163046604757f281f2324eaf84ccade93918 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Mon, 14 Oct 2024 09:38:50 -0600 Subject: [PATCH 110/136] feat(NODE-6403): add CSOT support to client bulk write (#4261) Co-authored-by: Warren James --- src/cmap/connection.ts | 2 + src/cmap/wire_protocol/on_data.ts | 1 + src/cursor/abstract_cursor.ts | 2 +- src/cursor/client_bulk_write_cursor.ts | 8 +- src/operations/client_bulk_write/executor.ts | 16 +- src/sdam/server.ts | 2 +- src/utils.ts | 13 + ...ient_side_operations_timeout.prose.test.ts | 29 +- .../node_csot.test.ts | 16 +- .../collection_db_management.test.ts | 4 +- .../crud/client_bulk_write.test.ts | 384 ++++++++++++++++++ test/tools/runner/config.ts | 28 +- test/tools/utils.ts | 67 +++ 13 files changed, 535 insertions(+), 37 deletions(-) create mode 100644 test/integration/crud/client_bulk_write.test.ts diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index a58ef566b7c..2e2900e40ae 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -716,6 +716,8 @@ export class Connection extends TypedEventEmitter { throw new MongoOperationTimeoutError('Timed out at socket write'); } throw error; + } finally { + timeout.clear(); } } return await drainEvent; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 64c636f41f1..f6732618330 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -116,6 +116,7 @@ export function onData( emitter.off('data', eventHandler); emitter.off('error', errorHandler); finished = true; + timeoutForSocketRead?.clear(); const doneResult = { value: undefined, done: finished } as const; for (const promise of unconsumedPromises) { diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 255a977a5f9..96d28d05584 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -243,7 +243,7 @@ export abstract class AbstractCursor< options.timeoutMode ?? (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { - if (options.timeoutMode != null) + if (options.timeoutMode != null && options.timeoutContext == null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts index 3a4e7eb99aa..72c73caad64 100644 --- a/src/cursor/client_bulk_write_cursor.ts +++ b/src/cursor/client_bulk_write_cursor.ts @@ -35,7 +35,7 @@ export class ClientBulkWriteCursor extends AbstractCursor { constructor( client: MongoClient, commandBuilder: ClientBulkWriteCommandBuilder, - options: ClientBulkWriteOptions = {} + options: ClientBulkWriteCursorOptions = {} ) { super(client, new MongoDBNamespace('admin', '$cmd'), options); @@ -72,7 +72,11 @@ export class ClientBulkWriteCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, clientBulkWriteOperation); + const response = await executeOperation( + this.client, + clientBulkWriteOperation, + this.timeoutContext + ); this.cursorResponse = response; return { server: clientBulkWriteOperation.server, session, response }; diff --git a/src/operations/client_bulk_write/executor.ts b/src/operations/client_bulk_write/executor.ts index 93acaac2160..6aac96aa631 100644 --- a/src/operations/client_bulk_write/executor.ts +++ b/src/operations/client_bulk_write/executor.ts @@ -1,3 +1,4 @@ +import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor'; import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor'; import { MongoClientBulkWriteError, @@ -5,6 +6,8 @@ import { MongoServerError } from '../../error'; import { type MongoClient } from '../../mongo_client'; +import { TimeoutContext } from '../../timeout'; +import { resolveTimeoutOptions } from '../../utils'; import { WriteConcern } from '../../write_concern'; import { executeOperation } from '../execute_operation'; import { ClientBulkWriteOperation } from './client_bulk_write'; @@ -70,17 +73,26 @@ export class ClientBulkWriteExecutor { pkFactory ); // Unacknowledged writes need to execute all batches and return { ok: 1} + const resolvedOptions = resolveTimeoutOptions(this.client, this.options); + const context = TimeoutContext.create(resolvedOptions); + if (this.options.writeConcern?.w === 0) { while (commandBuilder.hasNextBatch()) { const operation = new ClientBulkWriteOperation(commandBuilder, this.options); - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, context); } return { ok: 1 }; } else { const resultsMerger = new ClientBulkWriteResultsMerger(this.options); // For each command will will create and exhaust a cursor for the results. while (commandBuilder.hasNextBatch()) { - const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options); + const cursorContext = new CursorTimeoutContext(context, Symbol()); + const options = { + ...this.options, + timeoutContext: cursorContext, + ...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME }) + }; + const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options); try { await resultsMerger.merge(cursor); } catch (error) { diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 7ab2d9a043f..35a6f1de695 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -106,7 +106,7 @@ export type ServerEvents = { EventEmitterWithState; /** @internal */ -export type ServerCommandOptions = Omit & { +export type ServerCommandOptions = Omit & { timeoutContext: TimeoutContext; }; diff --git a/src/utils.ts b/src/utils.ts index 04174813c9c..15b3bab90f3 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -35,6 +35,7 @@ import { ServerType } from './sdam/common'; import type { Server } from './sdam/server'; import type { Topology } from './sdam/topology'; import type { ClientSession } from './sessions'; +import { type TimeoutContextOptions } from './timeout'; import { WriteConcern } from './write_concern'; /** @@ -514,6 +515,18 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { return keys.length > 0 && keys[0][0] === '$'; } +export function resolveTimeoutOptions>( + client: MongoClient, + options: T +): T & + Pick< + MongoClient['s']['options'], + 'timeoutMS' | 'serverSelectionTimeoutMS' | 'waitQueueTimeoutMS' | 'socketTimeoutMS' + > { + const { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS } = + client.s.options; + return { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS, ...options }; +} /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 80da92e10a3..458447a437c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -21,7 +21,8 @@ import { promiseWithResolvers, squashError } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, makeMultiBatchWrite } from '../../tools/utils'; +import { filterForCommands } from '../shared'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -1183,9 +1184,9 @@ describe('CSOT spec prose tests', function () { }); }); - describe.skip( + describe( '11. Multi-batch bulkWrites', - { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + { requires: { mongodb: '>=8.0', serverless: 'forbid', topology: 'single' } }, function () { /** * ### 11. Multi-batch bulkWrites @@ -1245,9 +1246,6 @@ describe('CSOT spec prose tests', function () { } }; - let maxBsonObjectSize: number; - let maxMessageSizeBytes: number; - beforeEach(async function () { await internalClient .db('db') @@ -1256,29 +1254,20 @@ describe('CSOT spec prose tests', function () { .catch(() => null); await internalClient.db('admin').command(failpoint); - const hello = await internalClient.db('admin').command({ hello: 1 }); - maxBsonObjectSize = hello.maxBsonObjectSize; - maxMessageSizeBytes = hello.maxMessageSizeBytes; - client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); }); - it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + it('performs two bulkWrites which fail to complete before 2000 ms', async function () { const writes = []; - client.on('commandStarted', ev => writes.push(ev)); + client.on('commandStarted', filterForCommands('bulkWrite', writes)); - const length = maxMessageSizeBytes / maxBsonObjectSize + 1; - const models = Array.from({ length }, () => ({ - namespace: 'db.coll', - name: 'insertOne' as const, - document: { a: 'b'.repeat(maxBsonObjectSize - 500) } - })); + const models = await makeMultiBatchWrite(this.configuration); const error = await client.bulkWrite(models).catch(error => error); expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); - expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); - }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + expect(writes).to.have.lengthOf(2); + }); } ); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 68d7b16f54d..a981a9113df 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -279,12 +279,16 @@ describe('CSOT driver tests', metadata, () => { .stub(Connection.prototype, 'readMany') .callsFake(async function* (...args) { const realIterator = readManyStub.wrappedMethod.call(this, ...args); - const cmd = commandSpy.lastCall.args.at(1); - if ('giveMeWriteErrors' in cmd) { - await realIterator.next().catch(() => null); // dismiss response - yield { parse: () => writeErrorsReply }; - } else { - yield (await realIterator.next()).value; + try { + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + } finally { + realIterator.return(); } }); }); diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts index f5c4c55cf05..0cb90b3b592 100644 --- a/test/integration/collection-management/collection_db_management.test.ts +++ b/test/integration/collection-management/collection_db_management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection, type Db, type MongoClient } from '../../mongodb'; +import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb'; describe('Collection Management and Db Management', function () { let client: MongoClient; @@ -16,7 +16,7 @@ describe('Collection Management and Db Management', function () { }); it('returns a collection object after calling createCollection', async function () { - const collection = await db.createCollection('collection'); + const collection = await db.createCollection(new ObjectId().toHexString()); expect(collection).to.be.instanceOf(Collection); }); diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts new file mode 100644 index 00000000000..6177077b632 --- /dev/null +++ b/test/integration/crud/client_bulk_write.test.ts @@ -0,0 +1,384 @@ +import { expect } from 'chai'; +import { setTimeout } from 'timers/promises'; + +import { + type CommandStartedEvent, + type Connection, + type ConnectionPool, + type MongoClient, + MongoOperationTimeoutError, + now, + TimeoutContext +} from '../../mongodb'; +import { + clearFailPoint, + configureFailPoint, + makeMultiBatchWrite, + makeMultiResponseBatchModelArray +} from '../../tools/utils'; +import { filterForCommands } from '../shared'; + +const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=8.0', + serverless: 'forbid' + } +}; + +describe('Client Bulk Write', function () { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + await clearFailPoint(this.configuration); + }); + + describe('CSOT enabled', function () { + describe('when timeoutMS is set on the client', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 300 }); + await client.connect(); + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite([ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ]) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on the bulkWrite operation', function () { + beforeEach(async function () { + client = this.configuration.newClient({}); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on both the client and operation options', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('bulk write options take precedence over the client options', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe( + 'unacknowledged writes', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + function () { + let connection: Connection; + let pool: ConnectionPool; + + beforeEach(async function () { + client = this.configuration.newClient({}, { maxPoolSize: 1, waitQueueTimeoutMS: 2000 }); + + await client.connect(); + + pool = Array.from(client.topology.s.servers.values())[0].pool; + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + }); + + afterEach(async function () { + pool = Array.from(client.topology.s.servers.values())[0].pool; + pool.checkIn(connection); + await client.close(); + }); + + it('a single batch bulk write does not take longer than timeoutMS', async function () { + const start = now(); + let end; + const timeoutError = client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 200, writeConcern: { w: 0 } } + ) + .catch(e => e) + .then(e => { + end = now(); + return e; + }); + + await setTimeout(250); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(200 - 100, 200 + 100); + }); + + it( + 'timeoutMS applies to all batches', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + let end; + const timeoutError = client + .bulkWrite(models, { + timeoutMS: 400, + writeConcern: { w: 0 } + }) + .catch(e => e) + .then(r => { + end = now(); + return r; + }); + + await setTimeout(210); + + pool.checkIn(connection); + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + + await setTimeout(210); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(400 - 100, 400 + 100); + } + ); + } + ); + + describe('acknowledged writes', metadata, function () { + describe('when a bulk write command times out', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('the operation times out', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when the timeout is reached while iterating the result cursor', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true, minPoolSize: 5 }); + client.on('commandStarted', filterForCommands(['getMore'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1400, failCommands: ['getMore'] } + }); + }); + + it('the bulk write operation times out', metadata, async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + verboseResults: true, + timeoutMS: 1500 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + // DRIVERS-3005 - killCursors causes cursor cleanup to extend past timeoutMS. + // The amount of time killCursors takes is wildly variable and can take up to almost + // 600-700ms sometimes. + expect(end - start).to.be.within(1500, 1500 + 800); + expect(commands).to.have.lengthOf(1); + }); + }); + + describe('if the cursor encounters an error and a killCursors is sent', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands(['killCursors'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + blockConnection: true, + blockTimeMS: 3000, + failCommands: ['getMore', 'killCursors'] + } + }); + }); + + it( + 'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command', + metadata, + async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const timeoutError = await client + .bulkWrite(models, { ordered: true, timeoutMS: 2800, verboseResults: true }) + .catch(e => e); + + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + expect(maxTimeMS).to.be.greaterThan(1000); + } + ); + }); + + describe('when the bulk write is executed in multiple batches', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { blockConnection: true, blockTimeMS: 1010, failCommands: ['bulkWrite'] } + }); + }); + + it( + 'timeoutMS applies to the duration of all batches', + { + requires: { + ...metadata.requires, + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + timeoutMS: 2000 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(2000 - 100, 2000 + 100); + expect(commands.length, 'Test must execute two batches.').to.equal(2); + } + ); + }); + }); + }); +}); diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 1d637486226..16024638fba 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -7,6 +7,7 @@ import { type AuthMechanism, HostAddress, MongoClient, + type MongoClientOptions, type ServerApi, TopologyType, type WriteConcernSettings @@ -82,7 +83,7 @@ export class TestConfiguration { auth?: { username: string; password: string; authSource?: string }; proxyURIParams?: ProxyParams; }; - serverApi: ServerApi; + serverApi?: ServerApi; activeResources: number; isSrv: boolean; serverlessCredentials: { username: string | undefined; password: string | undefined }; @@ -171,13 +172,34 @@ export class TestConfiguration { return this.options.replicaSet; } + /** + * Returns a `hello`, executed against `uri`. + */ + async hello(uri = this.uri) { + const client = this.newClient(uri); + try { + await client.connect(); + const { maxBsonObjectSize, maxMessageSizeBytes, maxWriteBatchSize, ...rest } = await client + .db('admin') + .command({ hello: 1 }); + return { + maxBsonObjectSize, + maxMessageSizeBytes, + maxWriteBatchSize, + ...rest + }; + } finally { + await client.close(); + } + } + isOIDC(uri: string, env: string): boolean { if (!uri) return false; return uri.indexOf('MONGODB-OIDC') > -1 && uri.indexOf(`ENVIRONMENT:${env}`) > -1; } - newClient(urlOrQueryOptions?: string | Record, serverOptions?: Record) { - serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); + newClient(urlOrQueryOptions?: string | Record, serverOptions?: MongoClientOptions) { + serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); // Support MongoClient constructor form (url, options) for `newClient`. if (typeof urlOrQueryOptions === 'string') { diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 8614bd7d64c..8ebc5e8f532 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -11,6 +11,7 @@ import { setTimeout } from 'timers'; import { inspect, promisify } from 'util'; import { + type AnyClientBulkWriteModel, type Document, type HostAddress, MongoClient, @@ -18,6 +19,7 @@ import { Topology, type TopologyOptions } from '../mongodb'; +import { type TestConfiguration } from './runner/config'; import { runUnifiedSuite } from './unified-spec-runner/runner'; import { type CollectionData, @@ -598,3 +600,68 @@ export async function waitUntilPoolsFilled( await Promise.all([wait$(), client.connect()]); } + +export async function configureFailPoint(configuration: TestConfiguration, failPoint: FailPoint) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command(failPoint); + } finally { + await utilClient.close(); + } +} + +export async function clearFailPoint(configuration: TestConfiguration) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command({ + configureFailPoint: 'failCommand', + mode: 'off' + }); + } finally { + await utilClient.close(); + } +} + +export async function makeMultiBatchWrite( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize, maxMessageSizeBytes } = await configuration.hello(); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + return models; +} + +export async function makeMultiResponseBatchModelArray( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize } = await configuration.hello(); + const namespace = `foo.${new BSON.ObjectId().toHexString()}`; + const models: AnyClientBulkWriteModel[] = [ + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) } + }, + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) } + } + ]; + + return models; +} From dd8f4b2afb866a76d2e55f7102cb08320df4b3d0 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 14 Oct 2024 14:19:48 -0400 Subject: [PATCH 111/136] add back newline --- src/operations/client_bulk_write/client_bulk_write.ts | 1 + src/operations/find.ts | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index 5b174c16197..26d1e7bb60f 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -90,6 +90,7 @@ export class ClientBulkWriteOperation extends CommandOperation */ oplogReplay?: boolean; - /** * Specifies the verbosity mode for the explain output. * @deprecated This API is deprecated in favor of `collection.find().explain()`. From d773ae3d36c353e76f7d9ed8590871a9023b9fa0 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 14 Oct 2024 14:20:31 -0400 Subject: [PATCH 112/136] remove extra newline --- src/operations/aggregate.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 2a18332ed78..0e9fbb0b846 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -45,7 +45,6 @@ export interface AggregateOptions extends Omit Date: Mon, 14 Oct 2024 14:22:09 -0400 Subject: [PATCH 113/136] more newline from rebase --- .../client_side_operations_timeout.prose.test.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 87a36c87e31..458447a437c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -762,7 +762,6 @@ describe('CSOT spec prose tests', function () { 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; }); - it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. @@ -811,8 +810,6 @@ describe('CSOT spec prose tests', function () { }).skipReason = 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - - it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. From 05bee51a2aec5ae22a67af8d9c4191c6e1a73a28 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 14 Oct 2024 15:58:41 -0400 Subject: [PATCH 114/136] ci please work... --- 670d70d52c1694928576fe47 | 1 + 670d711ae3d0977ee33ddd60 | 1 + 670d7133d8aac94602eb5bbc | 1 + 670d71493bcf2fb9275993d6 | 1 + 670d716849b93fb4055d3cec | 1 + 670d74814911d80d2371aad8 | 1 + 670d74900242c0f2c4d1eb46 | 1 + .../client_side_encryption.test.ts | 78 ++++--------------- ...ient_side_operations_timeout.prose.test.ts | 17 ++-- ...lient_side_operations_timeout.unit.test.ts | 2 +- 10 files changed, 33 insertions(+), 71 deletions(-) create mode 100644 670d70d52c1694928576fe47 create mode 100644 670d711ae3d0977ee33ddd60 create mode 100644 670d7133d8aac94602eb5bbc create mode 100644 670d71493bcf2fb9275993d6 create mode 100644 670d716849b93fb4055d3cec create mode 100644 670d74814911d80d2371aad8 create mode 100644 670d74900242c0f2c4d1eb46 diff --git a/670d70d52c1694928576fe47 b/670d70d52c1694928576fe47 new file mode 100644 index 00000000000..079e10f1f68 --- /dev/null +++ b/670d70d52c1694928576fe47 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 71006 } \ No newline at end of file diff --git a/670d711ae3d0977ee33ddd60 b/670d711ae3d0977ee33ddd60 new file mode 100644 index 00000000000..22928f8a764 --- /dev/null +++ b/670d711ae3d0977ee33ddd60 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 71606 } \ No newline at end of file diff --git a/670d7133d8aac94602eb5bbc b/670d7133d8aac94602eb5bbc new file mode 100644 index 00000000000..2cd7d1a9af3 --- /dev/null +++ b/670d7133d8aac94602eb5bbc @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 71850 } \ No newline at end of file diff --git a/670d71493bcf2fb9275993d6 b/670d71493bcf2fb9275993d6 new file mode 100644 index 00000000000..de2583492a0 --- /dev/null +++ b/670d71493bcf2fb9275993d6 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 72074 } \ No newline at end of file diff --git a/670d716849b93fb4055d3cec b/670d716849b93fb4055d3cec new file mode 100644 index 00000000000..9d5cbb92cb4 --- /dev/null +++ b/670d716849b93fb4055d3cec @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 72348 } \ No newline at end of file diff --git a/670d74814911d80d2371aad8 b/670d74814911d80d2371aad8 new file mode 100644 index 00000000000..18e01a29f34 --- /dev/null +++ b/670d74814911d80d2371aad8 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 78693 } \ No newline at end of file diff --git a/670d74900242c0f2c4d1eb46 b/670d74900242c0f2c4d1eb46 new file mode 100644 index 00000000000..a3909aef096 --- /dev/null +++ b/670d74900242c0f2c4d1eb46 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 78869 } \ No newline at end of file diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 4373f92075b..f925bd1ea66 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -31,7 +31,8 @@ describe('Client-Side Encryption (Integration)', function () { mode: 'alwaysOn', data: { failCommands: ['aggregate'], - errorCode: 89 + blockConnection: true, + blockTimeMS: 2000 } } as FailPoint); }); @@ -42,11 +43,7 @@ describe('Client-Side Encryption (Integration)', function () { .admin() .command({ configureFailPoint: 'failCommand', - mode: 'off', - data: { - failCommands: ['aggregate'], - errorCode: 89 - } + mode: 'off' } as FailPoint); await setupClient.close(); }); @@ -59,9 +56,8 @@ describe('Client-Side Encryption (Integration)', function () { {}, { autoEncryption: { - keyVaultNamespace: 'admin.datakeys', + keyVaultNamespace: 'data.datakeys', kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, local: { key: Buffer.alloc(96) } } }, @@ -95,7 +91,6 @@ describe('Client-Side Encryption (Integration)', function () { autoEncryption: { keyVaultNamespace: 'admin.datakeys', kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, local: { key: Buffer.alloc(96) } } } @@ -107,14 +102,14 @@ describe('Client-Side Encryption (Integration)', function () { encryptedClient.close(); }); - it('the command should fail due to a server error', async function () { + it('the command should not fail', async function () { const err = await encryptedClient .db('test') .collection('test') .aggregate([]) .toArray() .catch(e => e); - expect(err).to.be.instanceOf(MongoServerError); + expect(err).to.deep.equal([]); }); }); }); @@ -137,21 +132,6 @@ describe('Client-Side Encryption (Integration)', function () { encryptedClient = this.configuration.newClient( {}, { - autoEncryption: { - extraOptions: { - mongocryptdBypassSpawn: true, - mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', - mongocryptdSpawnArgs: [ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27017' - ] - }, - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - }, timeoutMS: 500 } ); @@ -184,25 +164,14 @@ describe('Client-Side Encryption (Integration)', function () { }); }); - context('when not provided timeoutContext and command hangs', function () { + context.skip('when not provided timeoutContext and command hangs', function () { let encryptedClient; let clock: sinon.SinonFakeTimers; let timerSandbox: sinon.SinonSandbox; let sleep; beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - } - } - ); + encryptedClient = this.configuration.newClient(); await encryptedClient.connect(); timerSandbox = createTimerSandbox(); clock = sinon.useFakeTimers(); @@ -255,7 +224,8 @@ describe('Client-Side Encryption (Integration)', function () { mode: 'alwaysOn', data: { failCommands: ['find'], - errorCode: 89 + blockConnection: true, + blockTimeMS: 2000 } } as FailPoint); }); @@ -266,11 +236,7 @@ describe('Client-Side Encryption (Integration)', function () { .admin() .command({ configureFailPoint: 'failCommand', - mode: 'off', - data: { - failCommands: ['find'], - errorCode: 89 - } + mode: 'off' } as FailPoint); await setupClient.close(); }); @@ -282,13 +248,6 @@ describe('Client-Side Encryption (Integration)', function () { encryptedClient = this.configuration.newClient( {}, { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - }, timeoutMS: 1000 } ); @@ -307,22 +266,11 @@ describe('Client-Side Encryption (Integration)', function () { }); }); - context('when not provided timeoutContext and command hangs', function () { + context.skip('when not provided timeoutContext and command hangs', function () { let encryptedClient; beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - } - } - ); + encryptedClient = this.configuration.newClient(); await encryptedClient.connect(); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 458447a437c..0152f8374a2 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -125,14 +125,20 @@ describe('CSOT spec prose tests', function () { let childProcess: ChildProcess; beforeEach(async function () { - childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { - stdio: 'ignore', - detached: true - }); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', new ObjectId().toHexString()], + { + stdio: 'ignore', + detached: true + } + ); childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { - monitorCommands: true + family: 6, + monitorCommands: true, + serverSelectionTimeoutMS: 2000 }); }); @@ -145,6 +151,7 @@ describe('CSOT spec prose tests', function () { it('maxTimeMS is not set', async function () { const commandStarted = []; client.on('commandStarted', ev => commandStarted.push(ev)); + await client.connect(); await client .db('admin') .command({ ping: 1 }) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 2755dbb8996..473143ca76a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -183,7 +183,7 @@ describe('CSOT spec unit tests', function () { }); }); - describe('Auto Encryption', function () { + describe('Auto Encryption', { requires: { mongodb: '>=4.2' } }, function () { context('when provided timeoutMS and command hangs', function () { let encryptedClient; From e58772b127ca7dcaf6416c6cb1bdac14e44f5748 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 14 Oct 2024 15:59:29 -0400 Subject: [PATCH 115/136] delete extraneous folder --- 670d70d52c1694928576fe47 | 1 - 670d711ae3d0977ee33ddd60 | 1 - 670d7133d8aac94602eb5bbc | 1 - 670d71493bcf2fb9275993d6 | 1 - 670d716849b93fb4055d3cec | 1 - 670d74814911d80d2371aad8 | 1 - 670d74900242c0f2c4d1eb46 | 1 - 7 files changed, 7 deletions(-) delete mode 100644 670d70d52c1694928576fe47 delete mode 100644 670d711ae3d0977ee33ddd60 delete mode 100644 670d7133d8aac94602eb5bbc delete mode 100644 670d71493bcf2fb9275993d6 delete mode 100644 670d716849b93fb4055d3cec delete mode 100644 670d74814911d80d2371aad8 delete mode 100644 670d74900242c0f2c4d1eb46 diff --git a/670d70d52c1694928576fe47 b/670d70d52c1694928576fe47 deleted file mode 100644 index 079e10f1f68..00000000000 --- a/670d70d52c1694928576fe47 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 71006 } \ No newline at end of file diff --git a/670d711ae3d0977ee33ddd60 b/670d711ae3d0977ee33ddd60 deleted file mode 100644 index 22928f8a764..00000000000 --- a/670d711ae3d0977ee33ddd60 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 71606 } \ No newline at end of file diff --git a/670d7133d8aac94602eb5bbc b/670d7133d8aac94602eb5bbc deleted file mode 100644 index 2cd7d1a9af3..00000000000 --- a/670d7133d8aac94602eb5bbc +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 71850 } \ No newline at end of file diff --git a/670d71493bcf2fb9275993d6 b/670d71493bcf2fb9275993d6 deleted file mode 100644 index de2583492a0..00000000000 --- a/670d71493bcf2fb9275993d6 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 72074 } \ No newline at end of file diff --git a/670d716849b93fb4055d3cec b/670d716849b93fb4055d3cec deleted file mode 100644 index 9d5cbb92cb4..00000000000 --- a/670d716849b93fb4055d3cec +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 72348 } \ No newline at end of file diff --git a/670d74814911d80d2371aad8 b/670d74814911d80d2371aad8 deleted file mode 100644 index 18e01a29f34..00000000000 --- a/670d74814911d80d2371aad8 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 78693 } \ No newline at end of file diff --git a/670d74900242c0f2c4d1eb46 b/670d74900242c0f2c4d1eb46 deleted file mode 100644 index a3909aef096..00000000000 --- a/670d74900242c0f2c4d1eb46 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 78869 } \ No newline at end of file From 0296525710537bc1c3d3bb1f0207ef5a9fe56cb7 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 10:26:40 -0400 Subject: [PATCH 116/136] remove test with unclosed socket --- 670d8d60676d0ec9386c71b4 | 1 + 670d8d8571cb6349704ac024 | 1 + 670d95e7bfc4a1132b20cc1b | 1 + 670e7980a23cb2dfc5f5306e | 1 + 670e79b34b65964675c60275 | 1 + 670e7a47951b2e34f81009d0 | 1 + 670e7abf9b9344beca0aebbf | 1 + 670e7b0cfdee58cac79e4eb5 | 1 + 670e7b8ae0ac0bfc2a30503d | 1 + .../client_side_encryption.test.ts | 66 ++----------------- ...lient_side_operations_timeout.unit.test.ts | 56 ---------------- test/tools/runner/hooks/leak_checker.ts | 2 +- 12 files changed, 15 insertions(+), 118 deletions(-) create mode 100644 670d8d60676d0ec9386c71b4 create mode 100644 670d8d8571cb6349704ac024 create mode 100644 670d95e7bfc4a1132b20cc1b create mode 100644 670e7980a23cb2dfc5f5306e create mode 100644 670e79b34b65964675c60275 create mode 100644 670e7a47951b2e34f81009d0 create mode 100644 670e7abf9b9344beca0aebbf create mode 100644 670e7b0cfdee58cac79e4eb5 create mode 100644 670e7b8ae0ac0bfc2a30503d diff --git a/670d8d60676d0ec9386c71b4 b/670d8d60676d0ec9386c71b4 new file mode 100644 index 00000000000..69584e1502a --- /dev/null +++ b/670d8d60676d0ec9386c71b4 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 94104 } \ No newline at end of file diff --git a/670d8d8571cb6349704ac024 b/670d8d8571cb6349704ac024 new file mode 100644 index 00000000000..1424d780567 --- /dev/null +++ b/670d8d8571cb6349704ac024 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 94423 } \ No newline at end of file diff --git a/670d95e7bfc4a1132b20cc1b b/670d95e7bfc4a1132b20cc1b new file mode 100644 index 00000000000..9a1b40fd9dc --- /dev/null +++ b/670d95e7bfc4a1132b20cc1b @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 5161 } \ No newline at end of file diff --git a/670e7980a23cb2dfc5f5306e b/670e7980a23cb2dfc5f5306e new file mode 100644 index 00000000000..ec2a5d86ae9 --- /dev/null +++ b/670e7980a23cb2dfc5f5306e @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 52103 } \ No newline at end of file diff --git a/670e79b34b65964675c60275 b/670e79b34b65964675c60275 new file mode 100644 index 00000000000..4f00ef3ea12 --- /dev/null +++ b/670e79b34b65964675c60275 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 52520 } \ No newline at end of file diff --git a/670e7a47951b2e34f81009d0 b/670e7a47951b2e34f81009d0 new file mode 100644 index 00000000000..79120919c4b --- /dev/null +++ b/670e7a47951b2e34f81009d0 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 53385 } \ No newline at end of file diff --git a/670e7abf9b9344beca0aebbf b/670e7abf9b9344beca0aebbf new file mode 100644 index 00000000000..3f849ff897b --- /dev/null +++ b/670e7abf9b9344beca0aebbf @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 54282 } \ No newline at end of file diff --git a/670e7b0cfdee58cac79e4eb5 b/670e7b0cfdee58cac79e4eb5 new file mode 100644 index 00000000000..394b03e3ec7 --- /dev/null +++ b/670e7b0cfdee58cac79e4eb5 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 54971 } \ No newline at end of file diff --git a/670e7b8ae0ac0bfc2a30503d b/670e7b8ae0ac0bfc2a30503d new file mode 100644 index 00000000000..ef91dc76ed9 --- /dev/null +++ b/670e7b8ae0ac0bfc2a30503d @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 56197 } \ No newline at end of file diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index f925bd1ea66..6547080d160 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -1,24 +1,14 @@ -import { setTimeout } from 'node:timers/promises'; -import { promisify } from 'node:util'; - import { expect } from 'chai'; import * as sinon from 'sinon'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; -import { - BSON, - Connection, - CSOTTimeoutContext, - MongoOperationTimeoutError, - MongoServerError -} from '../../mongodb'; +import { BSON, Connection, CSOTTimeoutContext, MongoOperationTimeoutError } from '../../mongodb'; import { type FailPoint, sleep } from '../../tools/utils'; -import { createTimerSandbox } from '../../unit/timer_sandbox'; describe('Client-Side Encryption (Integration)', function () { - describe('CSOT', function () { - describe('Auto encryption', { requires: { mongodb: '>=4.2' } }, function () { + describe('CSOT', { requires: { mongodb: '>=4.2' } }, function () { + describe('Auto encryption', function () { let setupClient; beforeEach(async function () { @@ -163,52 +153,6 @@ describe('Client-Side Encryption (Integration)', function () { expect(err).to.be.instanceOf(MongoOperationTimeoutError); }); }); - - context.skip('when not provided timeoutContext and command hangs', function () { - let encryptedClient; - let clock: sinon.SinonFakeTimers; - let timerSandbox: sinon.SinonSandbox; - let sleep; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient(); - await encryptedClient.connect(); - timerSandbox = createTimerSandbox(); - clock = sinon.useFakeTimers(); - sleep = promisify(setTimeout); - const stub = sinon - // @ts-expect-error accessing private method - .stub(Connection.prototype, 'sendCommand') - .callsFake(async function* (...args) { - await sleep(1000); - yield* stub.wrappedMethod.call(this, ...args); - }); - }); - - afterEach(async function () { - if (clock) { - timerSandbox.restore(); - clock.restore(); - clock = undefined; - } - await encryptedClient?.close(); - }); - - it('the command should not fail due to a timeout error within 30 seconds', async function () { - const sleepingFn = async () => { - await sleep(30000); - throw Error('Slept for 30s'); - }; - - const err$ = Promise.all([ - stateMachine.markCommand(encryptedClient, 'test.test', BSON.serialize({ ping: 1 })), - sleepingFn() - ]).catch(e => e); - clock.tick(30000); - const err = await err$; - expect(err.message).to.equal('Slept for 30s'); - }); - }); }); describe('#fetchKeys', function () { @@ -266,7 +210,7 @@ describe('Client-Side Encryption (Integration)', function () { }); }); - context.skip('when not provided timeoutContext and command hangs', function () { + context('when not provided timeoutContext and command hangs', function () { let encryptedClient; beforeEach(async function () { @@ -282,7 +226,7 @@ describe('Client-Side Encryption (Integration)', function () { const err = await stateMachine .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })) .catch(e => e); - expect(err).to.be.instanceOf(MongoServerError); + expect(err).to.deep.equal([]); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 473143ca76a..068e5cabf62 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -233,62 +233,6 @@ describe('CSOT spec unit tests', function () { expect(err).to.be.instanceOf(MongoOperationTimeoutError); }); }); - - context('when not provided timeoutMS and command hangs', function () { - let encryptedClient; - let clock: sinon.SinonFakeTimers; - let timerSandbox: sinon.SinonSandbox; - let sleep; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - } - } - ); - await encryptedClient.connect(); - timerSandbox = createTimerSandbox(); - clock = sinon.useFakeTimers(); - sleep = promisify(setTimeout); - const stub = sinon - // @ts-expect-error accessing private method - .stub(Connection.prototype, 'sendCommand') - .callsFake(async function* (...args) { - await sleep(1000); - yield* stub.wrappedMethod.call(this, ...args); - }); - }); - - afterEach(async function () { - if (clock) { - timerSandbox.restore(); - clock.restore(); - clock = undefined; - } - await encryptedClient?.close(); - }); - - it('the command should not fail due to a timeout error within 30 seconds', async function () { - const sleepingFn = async () => { - await sleep(30000); - throw Error('Slept for 30s'); - }; - - const err$ = Promise.all([encryptedClient.db().command({ ping: 1 }), sleepingFn()]).catch( - e => e - ); - clock.tick(30000); - const err = await err$; - expect(err.message).to.equal('Slept for 30s'); - }); - }); }); }); diff --git a/test/tools/runner/hooks/leak_checker.ts b/test/tools/runner/hooks/leak_checker.ts index 4f53c031dab..51982f54179 100644 --- a/test/tools/runner/hooks/leak_checker.ts +++ b/test/tools/runner/hooks/leak_checker.ts @@ -140,7 +140,7 @@ const leakCheckerAfterEach = async function () { } }; -const TRACE_SOCKETS = process.env.TRACE_SOCKETS === 'true' ? true : false; +const TRACE_SOCKETS = true; //process.env.TRACE_SOCKETS === 'true' ? true : false; const kSocketId = Symbol('socketId'); const originalCreateConnection = net.createConnection; let socketCounter = 0n; From 6af0aa47406b3e54e3247b650fa1a5d6f134fd65 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 10:27:14 -0400 Subject: [PATCH 117/136] remove test with unclosed socket --- 670d8d60676d0ec9386c71b4 | 1 - 670d8d8571cb6349704ac024 | 1 - 670d95e7bfc4a1132b20cc1b | 1 - 670e7980a23cb2dfc5f5306e | 1 - 670e79b34b65964675c60275 | 1 - 670e7a47951b2e34f81009d0 | 1 - 670e7abf9b9344beca0aebbf | 1 - 670e7b0cfdee58cac79e4eb5 | 1 - 670e7b8ae0ac0bfc2a30503d | 1 - 9 files changed, 9 deletions(-) delete mode 100644 670d8d60676d0ec9386c71b4 delete mode 100644 670d8d8571cb6349704ac024 delete mode 100644 670d95e7bfc4a1132b20cc1b delete mode 100644 670e7980a23cb2dfc5f5306e delete mode 100644 670e79b34b65964675c60275 delete mode 100644 670e7a47951b2e34f81009d0 delete mode 100644 670e7abf9b9344beca0aebbf delete mode 100644 670e7b0cfdee58cac79e4eb5 delete mode 100644 670e7b8ae0ac0bfc2a30503d diff --git a/670d8d60676d0ec9386c71b4 b/670d8d60676d0ec9386c71b4 deleted file mode 100644 index 69584e1502a..00000000000 --- a/670d8d60676d0ec9386c71b4 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 94104 } \ No newline at end of file diff --git a/670d8d8571cb6349704ac024 b/670d8d8571cb6349704ac024 deleted file mode 100644 index 1424d780567..00000000000 --- a/670d8d8571cb6349704ac024 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 94423 } \ No newline at end of file diff --git a/670d95e7bfc4a1132b20cc1b b/670d95e7bfc4a1132b20cc1b deleted file mode 100644 index 9a1b40fd9dc..00000000000 --- a/670d95e7bfc4a1132b20cc1b +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 5161 } \ No newline at end of file diff --git a/670e7980a23cb2dfc5f5306e b/670e7980a23cb2dfc5f5306e deleted file mode 100644 index ec2a5d86ae9..00000000000 --- a/670e7980a23cb2dfc5f5306e +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 52103 } \ No newline at end of file diff --git a/670e79b34b65964675c60275 b/670e79b34b65964675c60275 deleted file mode 100644 index 4f00ef3ea12..00000000000 --- a/670e79b34b65964675c60275 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 52520 } \ No newline at end of file diff --git a/670e7a47951b2e34f81009d0 b/670e7a47951b2e34f81009d0 deleted file mode 100644 index 79120919c4b..00000000000 --- a/670e7a47951b2e34f81009d0 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 53385 } \ No newline at end of file diff --git a/670e7abf9b9344beca0aebbf b/670e7abf9b9344beca0aebbf deleted file mode 100644 index 3f849ff897b..00000000000 --- a/670e7abf9b9344beca0aebbf +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 54282 } \ No newline at end of file diff --git a/670e7b0cfdee58cac79e4eb5 b/670e7b0cfdee58cac79e4eb5 deleted file mode 100644 index 394b03e3ec7..00000000000 --- a/670e7b0cfdee58cac79e4eb5 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 54971 } \ No newline at end of file diff --git a/670e7b8ae0ac0bfc2a30503d b/670e7b8ae0ac0bfc2a30503d deleted file mode 100644 index ef91dc76ed9..00000000000 --- a/670e7b8ae0ac0bfc2a30503d +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 56197 } \ No newline at end of file From 0031477ebabdca51856f2741c9662f831ddbc2d6 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 10:28:38 -0400 Subject: [PATCH 118/136] remove misc change --- test/tools/runner/hooks/leak_checker.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tools/runner/hooks/leak_checker.ts b/test/tools/runner/hooks/leak_checker.ts index 51982f54179..4f53c031dab 100644 --- a/test/tools/runner/hooks/leak_checker.ts +++ b/test/tools/runner/hooks/leak_checker.ts @@ -140,7 +140,7 @@ const leakCheckerAfterEach = async function () { } }; -const TRACE_SOCKETS = true; //process.env.TRACE_SOCKETS === 'true' ? true : false; +const TRACE_SOCKETS = process.env.TRACE_SOCKETS === 'true' ? true : false; const kSocketId = Symbol('socketId'); const originalCreateConnection = net.createConnection; let socketCounter = 0n; From f565e9f9f968e21f8c20f15bcb1b947e467bc63a Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 13:59:55 -0400 Subject: [PATCH 119/136] fix metadata logic --- 670e7f27a9590f8cad881208 | 1 + .../client_side_encryption.test.ts | 70 +++++++++++-------- 2 files changed, 40 insertions(+), 31 deletions(-) create mode 100644 670e7f27a9590f8cad881208 diff --git a/670e7f27a9590f8cad881208 b/670e7f27a9590f8cad881208 new file mode 100644 index 00000000000..8f4578b94e2 --- /dev/null +++ b/670e7f27a9590f8cad881208 @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 61530 } \ No newline at end of file diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 6547080d160..462a04225eb 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -7,7 +7,7 @@ import { BSON, Connection, CSOTTimeoutContext, MongoOperationTimeoutError } from import { type FailPoint, sleep } from '../../tools/utils'; describe('Client-Side Encryption (Integration)', function () { - describe('CSOT', { requires: { mongodb: '>=4.2' } }, function () { + describe('CSOT', function () { describe('Auto encryption', function () { let setupClient; @@ -38,39 +38,47 @@ describe('Client-Side Encryption (Integration)', function () { await setupClient.close(); }); - context('when client is provided timeoutMS and command hangs', function () { - let encryptedClient; + context( + 'when client is provided timeoutMS and command hangs', + { requires: { mongodb: '>=4.2' } }, + function () { + let encryptedClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'data.datakeys', - kmsProviders: { - local: { key: Buffer.alloc(96) } - } - }, - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); - }); + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'data.datakeys', + kmsProviders: { + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); - afterEach(async function () { - await encryptedClient.close(); - }); + afterEach(async function () { + await encryptedClient.close(); + }); - it('the command should fail due to a timeout error', async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - }); - }); + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2' } }, + async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + } + ); context('when client is not provided timeoutMS and command hangs', function () { let encryptedClient; From 15c3d726db6091b2cba2bf005806b19f7473d428 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 14:31:10 -0400 Subject: [PATCH 120/136] fix skippes test rquirement --- .../client_side_encryption.test.ts | 76 +++++++++---------- 1 file changed, 36 insertions(+), 40 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index 462a04225eb..e40ed362b7b 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -6,7 +6,7 @@ import { StateMachine } from '../../../src/client-side-encryption/state_machine' import { BSON, Connection, CSOTTimeoutContext, MongoOperationTimeoutError } from '../../mongodb'; import { type FailPoint, sleep } from '../../tools/utils'; -describe('Client-Side Encryption (Integration)', function () { +describe.only('Client-Side Encryption (Integration)', function () { describe('CSOT', function () { describe('Auto encryption', function () { let setupClient; @@ -38,47 +38,43 @@ describe('Client-Side Encryption (Integration)', function () { await setupClient.close(); }); - context( - 'when client is provided timeoutMS and command hangs', - { requires: { mongodb: '>=4.2' } }, - function () { - let encryptedClient; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'data.datakeys', - kmsProviders: { - local: { key: Buffer.alloc(96) } - } - }, - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient.close(); - }); + context('when client is provided timeoutMS and command hangs', function () { + let encryptedClient; - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2' } }, - async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultNamespace: 'data.datakeys', + kmsProviders: { + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS: 1000 } ); - } - ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient.close(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + }); context('when client is not provided timeoutMS and command hangs', function () { let encryptedClient; @@ -100,7 +96,7 @@ describe('Client-Side Encryption (Integration)', function () { encryptedClient.close(); }); - it('the command should not fail', async function () { + it('the command should not fail', { requires: { mongodb: '>=4.2.0' } }, async function () { const err = await encryptedClient .db('test') .collection('test') From 92cc6dda57efcd3fbd709824d7e7c35bee558985 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 15:23:35 -0400 Subject: [PATCH 121/136] lint fix --- .../client_side_encryption.test.ts | 60 +++++++++++-------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts index e40ed362b7b..cae35c561e9 100644 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.test.ts @@ -6,7 +6,7 @@ import { StateMachine } from '../../../src/client-side-encryption/state_machine' import { BSON, Connection, CSOTTimeoutContext, MongoOperationTimeoutError } from '../../mongodb'; import { type FailPoint, sleep } from '../../tools/utils'; -describe.only('Client-Side Encryption (Integration)', function () { +describe('Client-Side Encryption (Integration)', function () { describe('CSOT', function () { describe('Auto encryption', function () { let setupClient; @@ -145,17 +145,21 @@ describe.only('Client-Side Encryption (Integration)', function () { sinon.restore(); }); - it('the command should fail due to a timeout error', async function () { - const err = await stateMachine - .markCommand( - encryptedClient, - 'test.test', - BSON.serialize({ ping: 1 }), - timeoutContext() - ) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - }); + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); }); }); @@ -206,12 +210,16 @@ describe.only('Client-Side Encryption (Integration)', function () { await encryptedClient?.close(); }); - it('the command should fail due to a timeout error', async function () { - const err = await stateMachine - .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - }); + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); }); context('when not provided timeoutContext and command hangs', function () { @@ -226,12 +234,16 @@ describe.only('Client-Side Encryption (Integration)', function () { await encryptedClient?.close(); }); - it('the command should fail due to a server error', async function () { - const err = await stateMachine - .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })) - .catch(e => e); - expect(err).to.deep.equal([]); - }); + it( + 'the command should fail due to a server error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })) + .catch(e => e); + expect(err).to.deep.equal([]); + } + ); }); }); }); From 08056e86031d41d7876a61df108ecc5e90d7f3c5 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 16:14:58 -0400 Subject: [PATCH 122/136] final test.. --- 670ecd3219d37592acd3c59a | 1 + ...lient_side_operations_timeout.unit.test.ts | 20 +++++++++++-------- 2 files changed, 13 insertions(+), 8 deletions(-) create mode 100644 670ecd3219d37592acd3c59a diff --git a/670ecd3219d37592acd3c59a b/670ecd3219d37592acd3c59a new file mode 100644 index 00000000000..c9439950c6e --- /dev/null +++ b/670ecd3219d37592acd3c59a @@ -0,0 +1 @@ +{ "port" : 23000, "pid" : 43126 } \ No newline at end of file diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 068e5cabf62..a7096a71bcf 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -183,7 +183,7 @@ describe('CSOT spec unit tests', function () { }); }); - describe('Auto Encryption', { requires: { mongodb: '>=4.2' } }, function () { + describe('Auto Encryption', function () { context('when provided timeoutMS and command hangs', function () { let encryptedClient; @@ -225,13 +225,17 @@ describe('CSOT spec unit tests', function () { sinon.restore(); }); - it('the command should fail due to a timeout error', async function () { - const err = await encryptedClient - .db() - .command({ ping: 1 }) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - }); + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2' } }, + async function () { + const err = await encryptedClient + .db() + .command({ ping: 1 }) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); }); }); }); From fa6626838f12edc5da474a7edb0149cb2d586e35 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 15 Oct 2024 16:25:50 -0400 Subject: [PATCH 123/136] temp --- 670e7f27a9590f8cad881208 | 1 - 670ecd3219d37592acd3c59a | 1 - 2 files changed, 2 deletions(-) delete mode 100644 670e7f27a9590f8cad881208 delete mode 100644 670ecd3219d37592acd3c59a diff --git a/670e7f27a9590f8cad881208 b/670e7f27a9590f8cad881208 deleted file mode 100644 index 8f4578b94e2..00000000000 --- a/670e7f27a9590f8cad881208 +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 61530 } \ No newline at end of file diff --git a/670ecd3219d37592acd3c59a b/670ecd3219d37592acd3c59a deleted file mode 100644 index c9439950c6e..00000000000 --- a/670ecd3219d37592acd3c59a +++ /dev/null @@ -1 +0,0 @@ -{ "port" : 23000, "pid" : 43126 } \ No newline at end of file From f2572c97d3f786ccc93248d2242671735f73a589 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 16 Oct 2024 17:35:29 -0400 Subject: [PATCH 124/136] requested changes --- src/client-side-encryption/auto_encrypter.ts | 15 +- .../client_side_encryption.test.ts | 251 ------------- .../client-side-encryption/driver.test.ts | 351 +++++++++++++++++- ...ient_side_operations_timeout.prose.test.ts | 5 +- .../server_description.test.ts | 17 +- .../sessions/sessions.prose.test.ts | 18 +- 6 files changed, 382 insertions(+), 275 deletions(-) delete mode 100644 test/integration/client-side-encryption/client_side_encryption.test.ts diff --git a/src/client-side-encryption/auto_encrypter.ts b/src/client-side-encryption/auto_encrypter.ts index d2ebf61ca79..47c7ff62901 100644 --- a/src/client-side-encryption/auto_encrypter.ts +++ b/src/client-side-encryption/auto_encrypter.ts @@ -395,17 +395,10 @@ export class AutoEncrypter { socketOptions: autoSelectSocketOptions(this._client.options) }); - return deserialize( - await stateMachine.execute( - this, - context, - options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined - ), - { - promoteValues: false, - promoteLongs: false - } - ); + return deserialize(await stateMachine.execute(this, context, options.timeoutContext), { + promoteValues: false, + promoteLongs: false + }); } /** diff --git a/test/integration/client-side-encryption/client_side_encryption.test.ts b/test/integration/client-side-encryption/client_side_encryption.test.ts deleted file mode 100644 index cae35c561e9..00000000000 --- a/test/integration/client-side-encryption/client_side_encryption.test.ts +++ /dev/null @@ -1,251 +0,0 @@ -import { expect } from 'chai'; -import * as sinon from 'sinon'; - -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { StateMachine } from '../../../src/client-side-encryption/state_machine'; -import { BSON, Connection, CSOTTimeoutContext, MongoOperationTimeoutError } from '../../mongodb'; -import { type FailPoint, sleep } from '../../tools/utils'; - -describe('Client-Side Encryption (Integration)', function () { - describe('CSOT', function () { - describe('Auto encryption', function () { - let setupClient; - - beforeEach(async function () { - setupClient = this.configuration.newClient(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'alwaysOn', - data: { - failCommands: ['aggregate'], - blockConnection: true, - blockTimeMS: 2000 - } - } as FailPoint); - }); - - afterEach(async function () { - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'off' - } as FailPoint); - await setupClient.close(); - }); - - context('when client is provided timeoutMS and command hangs', function () { - let encryptedClient; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'data.datakeys', - kmsProviders: { - local: { key: Buffer.alloc(96) } - } - }, - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient.close(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); - - context('when client is not provided timeoutMS and command hangs', function () { - let encryptedClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - local: { key: Buffer.alloc(96) } - } - } - } - ); - }); - - afterEach(async function () { - encryptedClient.close(); - }); - - it('the command should not fail', { requires: { mongodb: '>=4.2.0' } }, async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .aggregate([]) - .toArray() - .catch(e => e); - expect(err).to.deep.equal([]); - }); - }); - }); - - describe('State machine', function () { - const stateMachine = new StateMachine({} as any); - - const timeoutContext = () => { - return new CSOTTimeoutContext({ - timeoutMS: 500, - serverSelectionTimeoutMS: 30000 - }); - }; - - describe('#markCommand', function () { - context('when provided timeoutContext and command hangs', function () { - let encryptedClient; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - timeoutMS: 500 - } - ); - await encryptedClient.connect(); - - const stub = sinon - // @ts-expect-error accessing private method - .stub(Connection.prototype, 'sendCommand') - .callsFake(async function* (...args) { - await sleep(1000); - yield* stub.wrappedMethod.call(this, ...args); - }); - }); - - afterEach(async function () { - await encryptedClient?.close(); - sinon.restore(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .markCommand( - encryptedClient, - 'test.test', - BSON.serialize({ ping: 1 }), - timeoutContext() - ) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); - }); - - describe('#fetchKeys', function () { - let setupClient; - - beforeEach(async function () { - setupClient = this.configuration.newClient(); - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'alwaysOn', - data: { - failCommands: ['find'], - blockConnection: true, - blockTimeMS: 2000 - } - } as FailPoint); - }); - - afterEach(async function () { - await setupClient - .db() - .admin() - .command({ - configureFailPoint: 'failCommand', - mode: 'off' - } as FailPoint); - await setupClient.close(); - }); - - context('when provided timeoutContext and command hangs', function () { - let encryptedClient; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient?.close(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); - - context('when not provided timeoutContext and command hangs', function () { - let encryptedClient; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient(); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient?.close(); - }); - - it( - 'the command should fail due to a server error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })) - .catch(e => e); - expect(err).to.deep.equal([]); - } - ); - }); - }); - }); - }); -}); diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 71c3cbd858d..b3914e068dc 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -1,12 +1,23 @@ import { EJSON, UUID } from 'bson'; import { expect } from 'chai'; import * as crypto from 'crypto'; +import * as sinon from 'sinon'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { type Collection, type CommandStartedEvent, type MongoClient } from '../../mongodb'; +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + type Collection, + type CommandStartedEvent, + Connection, + CSOTTimeoutContext, + type KMSProviders, + type MongoClient, + MongoOperationTimeoutError +} from '../../mongodb'; import * as BSON from '../../mongodb'; -import { getEncryptExtraOptions } from '../../tools/utils'; +import { type FailPoint, getEncryptExtraOptions, sleep } from '../../tools/utils'; const metadata = { requires: { @@ -471,3 +482,339 @@ describe('Range Explicit Encryption with JS native types', function () { }); }); }); + +describe('CSOT', function () { + describe('Auto encryption', function () { + let setupClient; + let keyVaultClient: MongoClient; + + beforeEach(async function () { + keyVaultClient = this.configuration.newClient(); + await keyVaultClient.connect(); + await keyVaultClient.db('keyvault').createCollection('datakeys'); + const clientEncryption = new ClientEncryption(keyVaultClient, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getKmsProviders() + }); + await clientEncryption.createDataKey('local'); + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['listCollections'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await keyVaultClient.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + const getKmsProviders = (): KMSProviders => { + const result = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || '{}') as unknown as { + local: unknown; + }; + return { local: result.local }; + }; + + const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=4.2.0', + clientSideEncryption: '>=6.1.0' + } + }; + + context('when client is provided timeoutMS and command hangs', function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getKmsProviders() + }, + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient.close(); + }); + + it('the command should fail due to a timeout error', metadata, async function () { + const err = await encryptedClient + .db('test') + .collection('test') + .aggregate([]) + .toArray() + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + context('when client is not provided timeoutMS and command hangs', function () { + let encryptedClient: MongoClient; + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'admin.datakeys', + kmsProviders: getKmsProviders() + } + } + ); + }); + + afterEach(async function () { + encryptedClient.close(); + }); + + it('the command should not fail', metadata, async function () { + await encryptedClient.db('test').collection('test').aggregate([]).toArray(); + }); + }); + }); + + describe('State machine', function () { + const stateMachine = new StateMachine({} as any); + + const timeoutContext = () => { + return new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + }; + + describe('#markCommand', function () { + context('when provided timeoutContext and command hangs', function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS: 500 + } + ); + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + }); + }); + + describe('#fetchKeys', function () { + let setupClient; + + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + context('when provided timeoutContext and command hangs', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + }); + + context('when not provided timeoutContext and command hangs', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient(); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should not fail due to a server error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + await stateMachine.fetchKeys( + encryptedClient, + 'test.test', + BSON.serialize({ a: 1 }) + ); + } + ); + }); + }); + + describe('#fetchCollectionInfo', function () { + let setupClient; + + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['listCollections'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + context('when provided timeoutContext and command hangs', function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS: 1000 + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const err = await stateMachine + .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) + .catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + }); + + context('when not provided timeoutContext and command hangs', function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient(); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should not fail due to a server error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); + } + ); + }); + }); + }); +}); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 0152f8374a2..b058eb6ce4a 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -3,6 +3,8 @@ import { type ChildProcess, spawn } from 'node:child_process'; import { expect } from 'chai'; +import * as os from 'os'; +import * as path from 'path'; import * as semver from 'semver'; import * as sinon from 'sinon'; import { Readable } from 'stream'; @@ -125,9 +127,10 @@ describe('CSOT spec prose tests', function () { let childProcess: ChildProcess; beforeEach(async function () { + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); childProcess = spawn( 'mongocryptd', - ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', new ObjectId().toHexString()], + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], { stdio: 'ignore', detached: true diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts index 0a3c7eecbf6..60aa4614055 100644 --- a/test/integration/server-discovery-and-monitoring/server_description.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -1,8 +1,10 @@ import { type ChildProcess, spawn } from 'node:child_process'; import { expect } from 'chai'; +import * as os from 'os'; +import * as path from 'path'; -import { MongoClient } from '../../mongodb'; +import { MongoClient, ObjectId } from '../../mongodb'; describe('class ServerDescription', function () { describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { @@ -11,10 +13,15 @@ describe('class ServerDescription', function () { let childProcess: ChildProcess; beforeEach(async function () { - childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { - stdio: 'ignore', - detached: true - }); + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], + { + stdio: 'ignore', + detached: true + } + ); childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); diff --git a/test/integration/sessions/sessions.prose.test.ts b/test/integration/sessions/sessions.prose.test.ts index 8f157c4fa75..82464ffbbdc 100644 --- a/test/integration/sessions/sessions.prose.test.ts +++ b/test/integration/sessions/sessions.prose.test.ts @@ -1,13 +1,16 @@ import { expect } from 'chai'; import { type ChildProcess, spawn } from 'child_process'; import { once } from 'events'; +import * as os from 'os'; +import * as path from 'path'; import { type Collection, type CommandStartedEvent, MongoClient, MongoDriverError, - MongoInvalidArgumentError + MongoInvalidArgumentError, + ObjectId } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -131,10 +134,15 @@ describe('Sessions Prose Tests', () => { let childProcess: ChildProcess; before(() => { - childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { - stdio: 'ignore', - detached: true - }); + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], + { + stdio: 'ignore', + detached: true + } + ); childProcess.on('error', err => { console.warn('Sessions prose mongocryptd error:', err); From 0dab0ca716a0c5f0f98b79d4113b39150f9f7ade Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 16 Oct 2024 17:37:44 -0400 Subject: [PATCH 125/136] lint --- test/integration/client-side-encryption/driver.test.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index b3914e068dc..1c537cbfb59 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -726,11 +726,7 @@ describe('CSOT', function () { 'the command should not fail due to a server error', { requires: { mongodb: '>=4.2.0' } }, async function () { - await stateMachine.fetchKeys( - encryptedClient, - 'test.test', - BSON.serialize({ a: 1 }) - ); + await stateMachine.fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })); } ); }); From 2a6a2d19bfbecd318b6d3e4118ed992da6c6346c Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 17 Oct 2024 11:31:23 -0400 Subject: [PATCH 126/136] requested changes --- .../integration/client-side-encryption/driver.test.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 1c537cbfb59..fc0aa09b6e9 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -505,7 +505,7 @@ describe('CSOT', function () { configureFailPoint: 'failCommand', mode: 'alwaysOn', data: { - failCommands: ['listCollections'], + failCommands: ['find'], blockConnection: true, blockTimeMS: 2000 } @@ -528,6 +528,7 @@ describe('CSOT', function () { const result = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || '{}') as unknown as { local: unknown; }; + return { local: result.local }; }; @@ -548,7 +549,10 @@ describe('CSOT', function () { autoEncryption: { keyVaultClient, keyVaultNamespace: 'keyvault.datakeys', - kmsProviders: getKmsProviders() + kmsProviders: getKmsProviders(), + schemaMap: { + 'test.test': {} + } }, timeoutMS: 1000 } @@ -564,8 +568,7 @@ describe('CSOT', function () { const err = await encryptedClient .db('test') .collection('test') - .aggregate([]) - .toArray() + .insertOne({ a: 1 }) .catch(e => e); expect(err).to.be.instanceOf(MongoOperationTimeoutError); }); From 953016050f6b4c2e99c5b258d43d6ea6aab9f201 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 17 Oct 2024 11:42:20 -0400 Subject: [PATCH 127/136] drop collection --- test/integration/client-side-encryption/driver.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index fc0aa09b6e9..133cbdd2144 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -513,6 +513,7 @@ describe('CSOT', function () { }); afterEach(async function () { + await keyVaultClient.db('keyvault').collection('datakeys').drop(); await keyVaultClient.close(); await setupClient .db() From 2088e633e1a7a66052fa4cb099e719fd14db4ac0 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 21 Oct 2024 11:07:22 -0400 Subject: [PATCH 128/136] test --- test/integration/client-side-encryption/driver.test.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 133cbdd2144..08d1ac07d03 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -491,7 +491,7 @@ describe('CSOT', function () { beforeEach(async function () { keyVaultClient = this.configuration.newClient(); await keyVaultClient.connect(); - await keyVaultClient.db('keyvault').createCollection('datakeys'); + await keyVaultClient.db('keyvault').collection('datakeys'); const clientEncryption = new ClientEncryption(keyVaultClient, { keyVaultNamespace: 'keyvault.datakeys', kmsProviders: getKmsProviders() @@ -513,7 +513,6 @@ describe('CSOT', function () { }); afterEach(async function () { - await keyVaultClient.db('keyvault').collection('datakeys').drop(); await keyVaultClient.close(); await setupClient .db() From 313eaa0a77637388ba186ed8d0d1f935b17db744 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Mon, 14 Oct 2024 09:38:50 -0600 Subject: [PATCH 129/136] feat(NODE-6403): add CSOT support to client bulk write (#4261) Co-authored-by: Warren James --- src/cmap/connection.ts | 2 + src/cmap/wire_protocol/on_data.ts | 1 + src/cursor/abstract_cursor.ts | 2 +- src/cursor/client_bulk_write_cursor.ts | 8 +- src/operations/client_bulk_write/executor.ts | 16 +- src/sdam/server.ts | 2 +- src/utils.ts | 13 + ...ient_side_operations_timeout.prose.test.ts | 29 +- .../node_csot.test.ts | 16 +- .../collection_db_management.test.ts | 4 +- .../crud/client_bulk_write.test.ts | 384 ++++++++++++++++++ test/tools/runner/config.ts | 28 +- test/tools/utils.ts | 67 +++ 13 files changed, 535 insertions(+), 37 deletions(-) create mode 100644 test/integration/crud/client_bulk_write.test.ts diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 9eaae6e81d1..6b1d3c24171 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -721,6 +721,8 @@ export class Connection extends TypedEventEmitter { throw new MongoOperationTimeoutError('Timed out at socket write'); } throw error; + } finally { + timeout.clear(); } } return await drainEvent; diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index 64c636f41f1..f6732618330 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -116,6 +116,7 @@ export function onData( emitter.off('data', eventHandler); emitter.off('error', errorHandler); finished = true; + timeoutForSocketRead?.clear(); const doneResult = { value: undefined, done: finished } as const; for (const promise of unconsumedPromises) { diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 255a977a5f9..96d28d05584 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -243,7 +243,7 @@ export abstract class AbstractCursor< options.timeoutMode ?? (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME); } else { - if (options.timeoutMode != null) + if (options.timeoutMode != null && options.timeoutContext == null) throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); } diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts index 69e166effca..d9da82d367b 100644 --- a/src/cursor/client_bulk_write_cursor.ts +++ b/src/cursor/client_bulk_write_cursor.ts @@ -34,7 +34,7 @@ export class ClientBulkWriteCursor extends AbstractCursor { constructor( client: MongoClient, commandBuilder: ClientBulkWriteCommandBuilder, - options: ClientBulkWriteOptions = {} + options: ClientBulkWriteCursorOptions = {} ) { super(client, new MongoDBNamespace('admin', '$cmd'), options); @@ -71,7 +71,11 @@ export class ClientBulkWriteCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, clientBulkWriteOperation); + const response = await executeOperation( + this.client, + clientBulkWriteOperation, + this.timeoutContext + ); this.cursorResponse = response; return { server: clientBulkWriteOperation.server, session, response }; diff --git a/src/operations/client_bulk_write/executor.ts b/src/operations/client_bulk_write/executor.ts index f02b7b6e795..ab7c4404f66 100644 --- a/src/operations/client_bulk_write/executor.ts +++ b/src/operations/client_bulk_write/executor.ts @@ -1,4 +1,5 @@ import { type Document } from '../../bson'; +import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor'; import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor'; import { MongoClientBulkWriteError, @@ -7,6 +8,8 @@ import { MongoServerError } from '../../error'; import { type MongoClient } from '../../mongo_client'; +import { TimeoutContext } from '../../timeout'; +import { resolveTimeoutOptions } from '../../utils'; import { WriteConcern } from '../../write_concern'; import { executeOperation } from '../execute_operation'; import { ClientBulkWriteOperation } from './client_bulk_write'; @@ -86,17 +89,26 @@ export class ClientBulkWriteExecutor { pkFactory ); // Unacknowledged writes need to execute all batches and return { ok: 1} + const resolvedOptions = resolveTimeoutOptions(this.client, this.options); + const context = TimeoutContext.create(resolvedOptions); + if (this.options.writeConcern?.w === 0) { while (commandBuilder.hasNextBatch()) { const operation = new ClientBulkWriteOperation(commandBuilder, this.options); - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, context); } return ClientBulkWriteResultsMerger.unacknowledged(); } else { const resultsMerger = new ClientBulkWriteResultsMerger(this.options); // For each command will will create and exhaust a cursor for the results. while (commandBuilder.hasNextBatch()) { - const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options); + const cursorContext = new CursorTimeoutContext(context, Symbol()); + const options = { + ...this.options, + timeoutContext: cursorContext, + ...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME }) + }; + const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options); try { await resultsMerger.merge(cursor); } catch (error) { diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 7ab2d9a043f..35a6f1de695 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -106,7 +106,7 @@ export type ServerEvents = { EventEmitterWithState; /** @internal */ -export type ServerCommandOptions = Omit & { +export type ServerCommandOptions = Omit & { timeoutContext: TimeoutContext; }; diff --git a/src/utils.ts b/src/utils.ts index 0c6477168ca..45aafb8aec5 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -36,6 +36,7 @@ import { ServerType } from './sdam/common'; import type { Server } from './sdam/server'; import type { Topology } from './sdam/topology'; import type { ClientSession } from './sessions'; +import { type TimeoutContextOptions } from './timeout'; import { WriteConcern } from './write_concern'; /** @@ -515,6 +516,18 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { return keys.length > 0 && keys[0][0] === '$'; } +export function resolveTimeoutOptions>( + client: MongoClient, + options: T +): T & + Pick< + MongoClient['s']['options'], + 'timeoutMS' | 'serverSelectionTimeoutMS' | 'waitQueueTimeoutMS' | 'socketTimeoutMS' + > { + const { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS } = + client.s.options; + return { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS, ...options }; +} /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 80da92e10a3..458447a437c 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -21,7 +21,8 @@ import { promiseWithResolvers, squashError } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { type FailPoint, makeMultiBatchWrite } from '../../tools/utils'; +import { filterForCommands } from '../shared'; // TODO(NODE-5824): Implement CSOT prose tests describe('CSOT spec prose tests', function () { @@ -1183,9 +1184,9 @@ describe('CSOT spec prose tests', function () { }); }); - describe.skip( + describe( '11. Multi-batch bulkWrites', - { requires: { mongodb: '>=8.0', serverless: 'forbid' } }, + { requires: { mongodb: '>=8.0', serverless: 'forbid', topology: 'single' } }, function () { /** * ### 11. Multi-batch bulkWrites @@ -1245,9 +1246,6 @@ describe('CSOT spec prose tests', function () { } }; - let maxBsonObjectSize: number; - let maxMessageSizeBytes: number; - beforeEach(async function () { await internalClient .db('db') @@ -1256,29 +1254,20 @@ describe('CSOT spec prose tests', function () { .catch(() => null); await internalClient.db('admin').command(failpoint); - const hello = await internalClient.db('admin').command({ hello: 1 }); - maxBsonObjectSize = hello.maxBsonObjectSize; - maxMessageSizeBytes = hello.maxMessageSizeBytes; - client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); }); - it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () { + it('performs two bulkWrites which fail to complete before 2000 ms', async function () { const writes = []; - client.on('commandStarted', ev => writes.push(ev)); + client.on('commandStarted', filterForCommands('bulkWrite', writes)); - const length = maxMessageSizeBytes / maxBsonObjectSize + 1; - const models = Array.from({ length }, () => ({ - namespace: 'db.coll', - name: 'insertOne' as const, - document: { a: 'b'.repeat(maxBsonObjectSize - 500) } - })); + const models = await makeMultiBatchWrite(this.configuration); const error = await client.bulkWrite(models).catch(error => error); expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); - expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']); - }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up'; + expect(writes).to.have.lengthOf(2); + }); } ); }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 68d7b16f54d..a981a9113df 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -279,12 +279,16 @@ describe('CSOT driver tests', metadata, () => { .stub(Connection.prototype, 'readMany') .callsFake(async function* (...args) { const realIterator = readManyStub.wrappedMethod.call(this, ...args); - const cmd = commandSpy.lastCall.args.at(1); - if ('giveMeWriteErrors' in cmd) { - await realIterator.next().catch(() => null); // dismiss response - yield { parse: () => writeErrorsReply }; - } else { - yield (await realIterator.next()).value; + try { + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + } finally { + realIterator.return(); } }); }); diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts index f5c4c55cf05..0cb90b3b592 100644 --- a/test/integration/collection-management/collection_db_management.test.ts +++ b/test/integration/collection-management/collection_db_management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection, type Db, type MongoClient } from '../../mongodb'; +import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb'; describe('Collection Management and Db Management', function () { let client: MongoClient; @@ -16,7 +16,7 @@ describe('Collection Management and Db Management', function () { }); it('returns a collection object after calling createCollection', async function () { - const collection = await db.createCollection('collection'); + const collection = await db.createCollection(new ObjectId().toHexString()); expect(collection).to.be.instanceOf(Collection); }); diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts new file mode 100644 index 00000000000..6177077b632 --- /dev/null +++ b/test/integration/crud/client_bulk_write.test.ts @@ -0,0 +1,384 @@ +import { expect } from 'chai'; +import { setTimeout } from 'timers/promises'; + +import { + type CommandStartedEvent, + type Connection, + type ConnectionPool, + type MongoClient, + MongoOperationTimeoutError, + now, + TimeoutContext +} from '../../mongodb'; +import { + clearFailPoint, + configureFailPoint, + makeMultiBatchWrite, + makeMultiResponseBatchModelArray +} from '../../tools/utils'; +import { filterForCommands } from '../shared'; + +const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=8.0', + serverless: 'forbid' + } +}; + +describe('Client Bulk Write', function () { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + await clearFailPoint(this.configuration); + }); + + describe('CSOT enabled', function () { + describe('when timeoutMS is set on the client', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 300 }); + await client.connect(); + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite([ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ]) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on the bulkWrite operation', function () { + beforeEach(async function () { + client = this.configuration.newClient({}); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on both the client and operation options', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('bulk write options take precedence over the client options', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe( + 'unacknowledged writes', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + function () { + let connection: Connection; + let pool: ConnectionPool; + + beforeEach(async function () { + client = this.configuration.newClient({}, { maxPoolSize: 1, waitQueueTimeoutMS: 2000 }); + + await client.connect(); + + pool = Array.from(client.topology.s.servers.values())[0].pool; + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + }); + + afterEach(async function () { + pool = Array.from(client.topology.s.servers.values())[0].pool; + pool.checkIn(connection); + await client.close(); + }); + + it('a single batch bulk write does not take longer than timeoutMS', async function () { + const start = now(); + let end; + const timeoutError = client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 200, writeConcern: { w: 0 } } + ) + .catch(e => e) + .then(e => { + end = now(); + return e; + }); + + await setTimeout(250); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(200 - 100, 200 + 100); + }); + + it( + 'timeoutMS applies to all batches', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + let end; + const timeoutError = client + .bulkWrite(models, { + timeoutMS: 400, + writeConcern: { w: 0 } + }) + .catch(e => e) + .then(r => { + end = now(); + return r; + }); + + await setTimeout(210); + + pool.checkIn(connection); + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + + await setTimeout(210); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(400 - 100, 400 + 100); + } + ); + } + ); + + describe('acknowledged writes', metadata, function () { + describe('when a bulk write command times out', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('the operation times out', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when the timeout is reached while iterating the result cursor', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true, minPoolSize: 5 }); + client.on('commandStarted', filterForCommands(['getMore'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1400, failCommands: ['getMore'] } + }); + }); + + it('the bulk write operation times out', metadata, async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + verboseResults: true, + timeoutMS: 1500 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + // DRIVERS-3005 - killCursors causes cursor cleanup to extend past timeoutMS. + // The amount of time killCursors takes is wildly variable and can take up to almost + // 600-700ms sometimes. + expect(end - start).to.be.within(1500, 1500 + 800); + expect(commands).to.have.lengthOf(1); + }); + }); + + describe('if the cursor encounters an error and a killCursors is sent', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands(['killCursors'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + blockConnection: true, + blockTimeMS: 3000, + failCommands: ['getMore', 'killCursors'] + } + }); + }); + + it( + 'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command', + metadata, + async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const timeoutError = await client + .bulkWrite(models, { ordered: true, timeoutMS: 2800, verboseResults: true }) + .catch(e => e); + + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + expect(maxTimeMS).to.be.greaterThan(1000); + } + ); + }); + + describe('when the bulk write is executed in multiple batches', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { blockConnection: true, blockTimeMS: 1010, failCommands: ['bulkWrite'] } + }); + }); + + it( + 'timeoutMS applies to the duration of all batches', + { + requires: { + ...metadata.requires, + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + timeoutMS: 2000 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(2000 - 100, 2000 + 100); + expect(commands.length, 'Test must execute two batches.').to.equal(2); + } + ); + }); + }); + }); +}); diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 1d637486226..16024638fba 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -7,6 +7,7 @@ import { type AuthMechanism, HostAddress, MongoClient, + type MongoClientOptions, type ServerApi, TopologyType, type WriteConcernSettings @@ -82,7 +83,7 @@ export class TestConfiguration { auth?: { username: string; password: string; authSource?: string }; proxyURIParams?: ProxyParams; }; - serverApi: ServerApi; + serverApi?: ServerApi; activeResources: number; isSrv: boolean; serverlessCredentials: { username: string | undefined; password: string | undefined }; @@ -171,13 +172,34 @@ export class TestConfiguration { return this.options.replicaSet; } + /** + * Returns a `hello`, executed against `uri`. + */ + async hello(uri = this.uri) { + const client = this.newClient(uri); + try { + await client.connect(); + const { maxBsonObjectSize, maxMessageSizeBytes, maxWriteBatchSize, ...rest } = await client + .db('admin') + .command({ hello: 1 }); + return { + maxBsonObjectSize, + maxMessageSizeBytes, + maxWriteBatchSize, + ...rest + }; + } finally { + await client.close(); + } + } + isOIDC(uri: string, env: string): boolean { if (!uri) return false; return uri.indexOf('MONGODB-OIDC') > -1 && uri.indexOf(`ENVIRONMENT:${env}`) > -1; } - newClient(urlOrQueryOptions?: string | Record, serverOptions?: Record) { - serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); + newClient(urlOrQueryOptions?: string | Record, serverOptions?: MongoClientOptions) { + serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); // Support MongoClient constructor form (url, options) for `newClient`. if (typeof urlOrQueryOptions === 'string') { diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 8614bd7d64c..8ebc5e8f532 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -11,6 +11,7 @@ import { setTimeout } from 'timers'; import { inspect, promisify } from 'util'; import { + type AnyClientBulkWriteModel, type Document, type HostAddress, MongoClient, @@ -18,6 +19,7 @@ import { Topology, type TopologyOptions } from '../mongodb'; +import { type TestConfiguration } from './runner/config'; import { runUnifiedSuite } from './unified-spec-runner/runner'; import { type CollectionData, @@ -598,3 +600,68 @@ export async function waitUntilPoolsFilled( await Promise.all([wait$(), client.connect()]); } + +export async function configureFailPoint(configuration: TestConfiguration, failPoint: FailPoint) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command(failPoint); + } finally { + await utilClient.close(); + } +} + +export async function clearFailPoint(configuration: TestConfiguration) { + const utilClient = configuration.newClient(); + await utilClient.connect(); + + try { + await utilClient.db('admin').command({ + configureFailPoint: 'failCommand', + mode: 'off' + }); + } finally { + await utilClient.close(); + } +} + +export async function makeMultiBatchWrite( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize, maxMessageSizeBytes } = await configuration.hello(); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + return models; +} + +export async function makeMultiResponseBatchModelArray( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize } = await configuration.hello(); + const namespace = `foo.${new BSON.ObjectId().toHexString()}`; + const models: AnyClientBulkWriteModel[] = [ + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) } + }, + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) } + } + ]; + + return models; +} From 07cffc7eb4ef0d4c3ad475f1f85cbd2072f48203 Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Thu, 17 Oct 2024 13:18:41 -0600 Subject: [PATCH 130/136] chore: fix a few flaky CSOT tests (#4278) --- ...resource-management-feature-integration.sh | 5 +- ...ient_side_operations_timeout.prose.test.ts | 2 +- .../node_csot.test.ts | 47 +++++++++------- .../crud/client_bulk_write.test.ts | 4 +- .../node-specific/abstract_cursor.test.ts | 54 +++++++++++++------ test/tools/utils.ts | 12 +++-- 6 files changed, 82 insertions(+), 42 deletions(-) diff --git a/.evergreen/run-resource-management-feature-integration.sh b/.evergreen/run-resource-management-feature-integration.sh index 093a4749d78..71756d96141 100644 --- a/.evergreen/run-resource-management-feature-integration.sh +++ b/.evergreen/run-resource-management-feature-integration.sh @@ -1,6 +1,9 @@ #! /bin/bash -source $DRIVERS_TOOLS/.evergreen/init-node-and-npm-env.sh +# source $DRIVERgit addS_TOOLS/.evergreen/init-node-and-npm-env.sh + +echo "node: $(node --version)" +echo "npm: $(npm --version)" echo "Building driver..." npm pack diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 458447a437c..146a2585c52 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -343,7 +343,7 @@ describe('CSOT spec prose tests', function () { client = this.configuration.newClient(undefined, { monitorCommands: true, - timeoutMS: 100, + timeoutMS: 150, minPoolSize: 20 }); await client.connect(); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index a981a9113df..12b380d8f1a 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -361,7 +361,7 @@ describe('CSOT driver tests', metadata, () => { describe('Non-Tailable cursors', () => { let client: MongoClient; let internalClient: MongoClient; - let commandStarted: CommandStartedEvent[]; + let commandStarted: (CommandStartedEvent & { command: { maxTimeMS?: number } })[]; let commandSucceeded: CommandSucceededEvent[]; const failpoint: FailPoint = { configureFailPoint: 'failCommand', @@ -369,7 +369,7 @@ describe('CSOT driver tests', metadata, () => { data: { failCommands: ['find', 'getMore'], blockConnection: true, - blockTimeMS: 50 + blockTimeMS: 150 } }; @@ -435,7 +435,7 @@ describe('CSOT driver tests', metadata, () => { const cursor = client .db('db') .collection('coll') - .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 200 }) .project({ _id: 0 }); // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error @@ -457,20 +457,25 @@ describe('CSOT driver tests', metadata, () => { const cursor = client .db('db') .collection('coll') - .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 100 }) + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 200 }) .project({ _id: 0 }); await cursor.toArray(); - expect(commandStarted).to.have.length.gte(3); // Find and 2 getMores - expect( - commandStarted.filter(ev => { - return ( - ev.command.find != null && - ev.command.getMore != null && - ev.command.maxTimeMS != null - ); - }) - ).to.have.lengthOf(0); + const commands = commandStarted.filter(c => + ['find', 'getMore'].includes(c.commandName) + ); + expect(commands).to.have.lengthOf(4); // Find and 2 getMores + + const [ + { command: aggregate }, + { command: getMore1 }, + { command: getMore2 }, + { command: getMore3 } + ] = commands; + expect(aggregate).not.to.have.property('maxTimeMS'); + expect(getMore1).not.to.have.property('maxTimeMS'); + expect(getMore2).not.to.have.property('maxTimeMS'); + expect(getMore3).not.to.have.property('maxTimeMS'); } ); }); @@ -644,7 +649,7 @@ describe('CSOT driver tests', metadata, () => { client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize }); commandStarted = []; client.on('commandStarted', ev => commandStarted.push(ev)); - await client.connect(); + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), minPoolSize); }); afterEach(async function () { @@ -685,11 +690,13 @@ describe('CSOT driver tests', metadata, () => { .db('db') .collection('coll') .find({}, { timeoutMS: 150, tailable: true, awaitData: true, batchSize: 1 }); - for (let i = 0; i < 5; i++) { - // Iterate cursor 5 times (server would have blocked for 500ms overall, but client - // should not throw - await cursor.next(); - } + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + await cursor.next(); + await cursor.next(); + await cursor.next(); + await cursor.next(); }); it('does not use timeoutMS to compute maxTimeMS for getMores', metadata, async function () { diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts index 6177077b632..d9b5512b76e 100644 --- a/test/integration/crud/client_bulk_write.test.ts +++ b/test/integration/crud/client_bulk_write.test.ts @@ -320,7 +320,9 @@ describe('Client Bulk Write', function () { it( 'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command', - metadata, + { + requires: { ...metadata.requires, topology: '!load-balanced' } + }, async function () { const models = await makeMultiResponseBatchModelArray(this.configuration); const timeoutError = await client diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index 136e72a3499..8e154e1dc3e 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,6 +7,7 @@ import { inspect } from 'util'; import { AbstractCursor, type Collection, + type CommandStartedEvent, CursorTimeoutContext, CursorTimeoutMode, type FindCursor, @@ -17,7 +18,8 @@ import { MongoServerError, TimeoutContext } from '../../mongodb'; -import { type FailPoint } from '../../tools/utils'; +import { clearFailPoint, configureFailPoint } from '../../tools/utils'; +import { filterForCommands } from '../shared'; describe('class AbstractCursor', function () { describe('regression tests NODE-5372', function () { @@ -405,9 +407,11 @@ describe('class AbstractCursor', function () { let client: MongoClient; let collection: Collection; let context: CursorTimeoutContext; + const commands: CommandStartedEvent[] = []; beforeEach(async function () { - client = this.configuration.newClient(); + client = this.configuration.newClient({}, { monitorCommands: true }); + client.on('commandStarted', filterForCommands('killCursors', commands)); collection = client.db('abstract_cursor_integration').collection('test'); @@ -473,15 +477,17 @@ describe('class AbstractCursor', function () { }); describe('when the cursor refreshes the timeout for killCursors', function () { - it( - 'the provided timeoutContext is not modified', - { - requires: { - mongodb: '>=4.4' - } - }, - async function () { - await client.db('admin').command({ + let uri: string; + + before(function () { + uri = this.configuration.url({ useMultipleMongoses: false }); + }); + + beforeEach(async function () { + commands.length = 0; + await configureFailPoint( + this.configuration, + { configureFailPoint: 'failCommand', mode: { times: 1 }, data: { @@ -489,23 +495,41 @@ describe('class AbstractCursor', function () { blockConnection: true, blockTimeMS: 5000 } - } as FailPoint); + }, + uri + ); + }); + + afterEach(async function () { + await clearFailPoint(this.configuration, uri); + }); + it( + 'the provided timeoutContext is not modified', + { + requires: { + mongodb: '>=4.4', + topology: '!load-balanced' + } + }, + async function () { const cursor = collection.find( {}, { timeoutContext: context, - timeoutMS: 1000, + timeoutMS: 150, timeoutMode: CursorTimeoutMode.LIFETIME, batchSize: 1 } ); + const refresh = sinon.spy(context, 'refresh'); + const refreshed = sinon.spy(context, 'refreshed'); const error = await cursor.toArray().catch(e => e); expect(error).to.be.instanceof(MongoOperationTimeoutError); - // @ts-expect-error We know we have a CSOT timeout context but TS does not. - expect(context.timeoutContext.remainingTimeMS).to.be.lessThan(0); + expect(refresh.called).to.be.false; + expect(refreshed.called).to.be.true; } ); }); diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 8ebc5e8f532..38c0da6c092 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -601,8 +601,12 @@ export async function waitUntilPoolsFilled( await Promise.all([wait$(), client.connect()]); } -export async function configureFailPoint(configuration: TestConfiguration, failPoint: FailPoint) { - const utilClient = configuration.newClient(); +export async function configureFailPoint( + configuration: TestConfiguration, + failPoint: FailPoint, + uri = configuration.url() +) { + const utilClient = configuration.newClient(uri); await utilClient.connect(); try { @@ -612,8 +616,8 @@ export async function configureFailPoint(configuration: TestConfiguration, failP } } -export async function clearFailPoint(configuration: TestConfiguration) { - const utilClient = configuration.newClient(); +export async function clearFailPoint(configuration: TestConfiguration, uri = configuration.url()) { + const utilClient = configuration.newClient(uri); await utilClient.connect(); try { From c3f31dae7fd10d2db4a67f1f7c15f785167ef77e Mon Sep 17 00:00:00 2001 From: Bailey Pearson Date: Mon, 21 Oct 2024 05:25:07 -0600 Subject: [PATCH 131/136] feat(NODE-6421): add support for timeoutMS to explain helpers (#4268) --- package-lock.json | 9 +- package.json | 2 +- src/cursor/aggregation_cursor.ts | 40 ++- src/cursor/find_cursor.ts | 45 +++- src/explain.ts | 85 ++++++ src/index.ts | 2 + src/operations/command.ts | 15 +- src/operations/find.ts | 9 +- src/utils.ts | 27 -- test/integration/crud/explain.test.ts | 369 ++++++++++++++++++++++++++ test/tools/runner/config.ts | 20 +- test/tools/utils.ts | 23 +- test/unit/explain.test.ts | 40 ++- test/unit/index.test.ts | 1 + 14 files changed, 625 insertions(+), 62 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2b3a9b897aa..5b662575189 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.2", + "mongodb-legacy": "^6.1.3", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6440,11 +6440,10 @@ } }, "node_modules/mongodb-legacy": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.2.tgz", - "integrity": "sha512-oj+LLtvhhi8XuAQ8dll2BVjrnKxOo/7ylyQu0LsKmzyGcbrvzcyvFUOLC6rPhuJPOvnezh3MZ3/Sk9Tl1jpUpg==", + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.3.tgz", + "integrity": "sha512-XJ2PIbVEHUUF4/SyH00dfeprfeLOdWiHcKq8At+JoEZeTue+IAG39G2ixRwClnI7roPb/46K8IF713v9dgQ8rg==", "dev": true, - "license": "Apache-2.0", "dependencies": { "mongodb": "^6.0.0" }, diff --git a/package.json b/package.json index 0c4c668726a..2cd17a9c08c 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.2", + "mongodb-legacy": "^6.1.3", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 056f28454ce..db7bd20b5fa 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,6 +1,12 @@ import type { Document } from '../bson'; import { MongoAPIError } from '../error'; -import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; +import { + Explain, + ExplainableCursor, + type ExplainCommandOptions, + type ExplainVerbosityLike, + validateExplainTimeoutOptions +} from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; import { executeOperation } from '../operations/execute_operation'; @@ -8,7 +14,6 @@ import type { ClientSession } from '../sessions'; import type { Sort } from '../sort'; import { mergeOptions, type MongoDBNamespace } from '../utils'; import { - AbstractCursor, type AbstractCursorOptions, CursorTimeoutMode, type InitialCursorResponse @@ -24,7 +29,7 @@ export interface AggregationCursorOptions extends AbstractCursorOptions, Aggrega * or higher stream * @public */ -export class AggregationCursor extends AbstractCursor { +export class AggregationCursor extends ExplainableCursor { public readonly pipeline: Document[]; /** @internal */ private aggregateOptions: AggregateOptions; @@ -65,11 +70,20 @@ export class AggregationCursor extends AbstractCursor { /** @internal */ async _initialize(session: ClientSession): Promise { - const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, { + const options = { ...this.aggregateOptions, ...this.cursorOptions, session - }); + }; + try { + validateExplainTimeoutOptions(options, Explain.fromOptions(options)); + } catch { + throw new MongoAPIError( + 'timeoutMS cannot be used with explain when explain is specified in aggregateOptions' + ); + } + + const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, options); const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); @@ -77,14 +91,26 @@ export class AggregationCursor extends AbstractCursor { } /** Execute the explain for the cursor */ - async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise { + async explain(): Promise; + async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + async explain(options: { timeoutMS?: number }): Promise; + async explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + async explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise { + const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options); return ( await executeOperation( this.client, new AggregateOperation(this.namespace, this.pipeline, { ...this.aggregateOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, - explain: verbosity ?? true + ...timeout, + explain: explain ?? true }) ) ).shift(this.deserializationOptions); diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 96b764dc7ff..469c27628a5 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -1,7 +1,13 @@ import { type Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; -import { MongoInvalidArgumentError, MongoTailableCursorError } from '../error'; -import { type ExplainCommandOptions, type ExplainVerbosityLike } from '../explain'; +import { MongoAPIError, MongoInvalidArgumentError, MongoTailableCursorError } from '../error'; +import { + Explain, + ExplainableCursor, + type ExplainCommandOptions, + type ExplainVerbosityLike, + validateExplainTimeoutOptions +} from '../explain'; import type { MongoClient } from '../mongo_client'; import type { CollationOptions } from '../operations/command'; import { CountOperation, type CountOptions } from '../operations/count'; @@ -11,7 +17,7 @@ import type { Hint } from '../operations/operation'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortDirection } from '../sort'; import { emitWarningOnce, mergeOptions, type MongoDBNamespace, squashError } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { type InitialCursorResponse } from './abstract_cursor'; /** @public Flags allowed for cursor */ export const FLAGS = [ @@ -24,7 +30,7 @@ export const FLAGS = [ ] as const; /** @public */ -export class FindCursor extends AbstractCursor { +export class FindCursor extends ExplainableCursor { /** @internal */ private cursorFilter: Document; /** @internal */ @@ -63,11 +69,21 @@ export class FindCursor extends AbstractCursor { /** @internal */ async _initialize(session: ClientSession): Promise { - const findOperation = new FindOperation(this.namespace, this.cursorFilter, { + const options = { ...this.findOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, session - }); + }; + + try { + validateExplainTimeoutOptions(options, Explain.fromOptions(options)); + } catch { + throw new MongoAPIError( + 'timeoutMS cannot be used with explain when explain is specified in findOptions' + ); + } + + const findOperation = new FindOperation(this.namespace, this.cursorFilter, options); const response = await executeOperation(this.client, findOperation, this.timeoutContext); @@ -133,14 +149,27 @@ export class FindCursor extends AbstractCursor { } /** Execute the explain for the cursor */ - async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise { + async explain(): Promise; + async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + async explain(options: { timeoutMS?: number }): Promise; + async explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + async explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise { + const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options); + return ( await executeOperation( this.client, new FindOperation(this.namespace, this.cursorFilter, { ...this.findOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, - explain: verbosity ?? true + ...timeout, + explain: explain ?? true }) ) ).shift(this.deserializationOptions); diff --git a/src/explain.ts b/src/explain.ts index 51f591efd47..670bea53041 100644 --- a/src/explain.ts +++ b/src/explain.ts @@ -1,3 +1,7 @@ +import { type Document } from './bson'; +import { AbstractCursor } from './cursor/abstract_cursor'; +import { MongoAPIError } from './error'; + /** @public */ export const ExplainVerbosity = Object.freeze({ queryPlanner: 'queryPlanner', @@ -86,3 +90,84 @@ export class Explain { return new Explain(verbosity, maxTimeMS); } } + +export function validateExplainTimeoutOptions(options: Document, explain?: Explain) { + const { maxTimeMS, timeoutMS } = options; + if (timeoutMS != null && (maxTimeMS != null || explain?.maxTimeMS != null)) { + throw new MongoAPIError('Cannot use maxTimeMS with timeoutMS for explain commands.'); + } +} + +/** + * Applies an explain to a given command. + * @internal + * + * @param command - the command on which to apply the explain + * @param options - the options containing the explain verbosity + */ +export function decorateWithExplain( + command: Document, + explain: Explain +): { + explain: Document; + verbosity: ExplainVerbosity; + maxTimeMS?: number; +} { + type ExplainCommand = ReturnType; + const { verbosity, maxTimeMS } = explain; + const baseCommand: ExplainCommand = { explain: command, verbosity }; + + if (typeof maxTimeMS === 'number') { + baseCommand.maxTimeMS = maxTimeMS; + } + + return baseCommand; +} + +/** + * @public + * + * A base class for any cursors that have `explain()` methods. + */ +export abstract class ExplainableCursor extends AbstractCursor { + /** Execute the explain for the cursor */ + abstract explain(): Promise; + abstract explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + abstract explain(options: { timeoutMS?: number }): Promise; + abstract explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + abstract explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise; + + protected resolveExplainTimeoutOptions( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): { timeout?: { timeoutMS?: number }; explain?: ExplainVerbosityLike | ExplainCommandOptions } { + let explain: ExplainVerbosityLike | ExplainCommandOptions | undefined; + let timeout: { timeoutMS?: number } | undefined; + + if (verbosity == null && options == null) { + explain = undefined; + timeout = undefined; + } else if (verbosity != null && options == null) { + explain = + typeof verbosity !== 'object' + ? verbosity + : 'verbosity' in verbosity + ? verbosity + : undefined; + + timeout = typeof verbosity === 'object' && 'timeoutMS' in verbosity ? verbosity : undefined; + } else { + // @ts-expect-error TS isn't smart enough to determine that if both options are provided, the first is explain options + explain = verbosity; + timeout = options; + } + + return { timeout, explain }; + } +} diff --git a/src/index.ts b/src/index.ts index 419ddc2e692..65f9ec7ccb7 100644 --- a/src/index.ts +++ b/src/index.ts @@ -10,6 +10,7 @@ import { ListCollectionsCursor } from './cursor/list_collections_cursor'; import { ListIndexesCursor } from './cursor/list_indexes_cursor'; import type { RunCommandCursor } from './cursor/run_command_cursor'; import { Db } from './db'; +import { ExplainableCursor } from './explain'; import { GridFSBucket } from './gridfs'; import { GridFSBucketReadStream } from './gridfs/download'; import { GridFSBucketWriteStream } from './gridfs/upload'; @@ -91,6 +92,7 @@ export { ClientSession, Collection, Db, + ExplainableCursor, FindCursor, GridFSBucket, GridFSBucketReadStream, diff --git a/src/operations/command.ts b/src/operations/command.ts index 5bd80f796d1..bcd3919017b 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -1,19 +1,19 @@ import type { BSONSerializeOptions, Document } from '../bson'; import { type MongoDBResponseConstructor } from '../cmap/wire_protocol/responses'; import { MongoInvalidArgumentError } from '../error'; -import { Explain, type ExplainOptions } from '../explain'; +import { + decorateWithExplain, + Explain, + type ExplainOptions, + validateExplainTimeoutOptions +} from '../explain'; import { ReadConcern } from '../read_concern'; import type { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection'; import type { ClientSession } from '../sessions'; import { type TimeoutContext } from '../timeout'; -import { - commandSupportsReadConcern, - decorateWithExplain, - maxWireVersion, - MongoDBNamespace -} from '../utils'; +import { commandSupportsReadConcern, maxWireVersion, MongoDBNamespace } from '../utils'; import { WriteConcern, type WriteConcernOptions } from '../write_concern'; import type { ReadConcernLike } from './../read_concern'; import { AbstractOperation, Aspect, type OperationOptions } from './operation'; @@ -97,6 +97,7 @@ export abstract class CommandOperation extends AbstractOperation { if (this.hasAspect(Aspect.EXPLAINABLE)) { this.explain = Explain.fromOptions(options); + validateExplainTimeoutOptions(this.options, this.explain); } else if (options?.explain != null) { throw new MongoInvalidArgumentError(`Option "explain" is not supported on this command`); } diff --git a/src/operations/find.ts b/src/operations/find.ts index 10453d141da..1775ea6e07f 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -2,13 +2,17 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; -import { type ExplainOptions } from '../explain'; +import { + decorateWithExplain, + type ExplainOptions, + validateExplainTimeoutOptions +} from '../explain'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; import { type TimeoutContext } from '../timeout'; -import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; +import { type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -119,6 +123,7 @@ export class FindOperation extends CommandOperation { let findCommand = makeFindCommand(this.ns, this.filter, options); if (this.explain) { + validateExplainTimeoutOptions(this.options, this.explain); findCommand = decorateWithExplain(findCommand, this.explain); } diff --git a/src/utils.ts b/src/utils.ts index 45aafb8aec5..e4381908cc5 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -26,7 +26,6 @@ import { MongoParseError, MongoRuntimeError } from './error'; -import type { Explain, ExplainVerbosity } from './explain'; import type { MongoClient } from './mongo_client'; import type { CommandOperationOptions, OperationParent } from './operations/command'; import type { Hint, OperationOptions } from './operations/operation'; @@ -246,32 +245,6 @@ export function decorateWithReadConcern( } } -/** - * Applies an explain to a given command. - * @internal - * - * @param command - the command on which to apply the explain - * @param options - the options containing the explain verbosity - */ -export function decorateWithExplain( - command: Document, - explain: Explain -): { - explain: Document; - verbosity: ExplainVerbosity; - maxTimeMS?: number; -} { - type ExplainCommand = ReturnType; - const { verbosity, maxTimeMS } = explain; - const baseCommand: ExplainCommand = { explain: command, verbosity }; - - if (typeof maxTimeMS === 'number') { - baseCommand.maxTimeMS = maxTimeMS; - } - - return baseCommand; -} - /** * @internal */ diff --git a/test/integration/crud/explain.test.ts b/test/integration/crud/explain.test.ts index 44fe381303a..c7a9a3025f9 100644 --- a/test/integration/crud/explain.test.ts +++ b/test/integration/crud/explain.test.ts @@ -5,9 +5,12 @@ import { type Collection, type CommandStartedEvent, type Db, + type Document, type MongoClient, + MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; +import { clearFailPoint, configureFailPoint, measureDuration } from '../../tools/utils'; import { filterForCommands } from '../shared'; const explain = [true, false, 'queryPlanner', 'allPlansExecution', 'executionStats', 'invalid']; @@ -296,6 +299,372 @@ describe('CRUD API explain option', function () { }; } }); + + describe('explain with timeoutMS', function () { + let client: MongoClient; + type ExplainStartedEvent = CommandStartedEvent & { + command: { explain: Document & { maxTimeMS?: number }; maxTimeMS?: number }; + }; + const commands: ExplainStartedEvent[] = []; + + afterEach(async function () { + await clearFailPoint( + this.configuration, + this.configuration.url({ useMultipleMongoses: false }) + ); + }); + + beforeEach(async function () { + const uri = this.configuration.url({ useMultipleMongoses: false }); + await configureFailPoint( + this.configuration, + { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['explain'], + blockConnection: true, + blockTimeMS: 2000 + } + }, + this.configuration.url({ useMultipleMongoses: false }) + ); + + client = this.configuration.newClient(uri, { monitorCommands: true }); + client.on('commandStarted', filterForCommands('explain', commands)); + await client.connect(); + }); + + afterEach(async function () { + await client?.close(); + commands.length = 0; + }); + + describe('Explain helpers respect timeoutMS', function () { + describe('when a cursor api is being explained', function () { + describe('when timeoutMS is provided', function () { + it( + 'the explain command times out after timeoutMS', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const { duration, result } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(result).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(1000 - 100, 1000 + 100); + } + ); + + it( + 'the explain command has the calculated maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const timeout = await cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e); + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + + expect(maxTimeMS).to.be.a('number'); + } + ); + + it( + 'the explained command does not have a maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const timeout = await cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e); + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { + explain: { maxTimeMS } + } + } + ] = commands; + + expect(maxTimeMS).not.to.exist; + } + ); + }); + + describe('when timeoutMS and maxTimeMS are both provided', function () { + it( + 'an error is thrown indicating incompatibility of those options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const error = await cursor + .explain({ verbosity: 'queryPlanner', maxTimeMS: 1000 }) + .catch(e => e); + expect(error).to.match(/Cannot use maxTimeMS with timeoutMS for explain commands/); + } + ); + }); + }); + + describe('when a non-cursor api is being explained', function () { + describe('when timeoutMS is provided', function () { + it( + 'the explain command times out after timeoutMS', + { requires: { mongodb: '>=4.4' } }, + async function () { + const { duration, result } = await measureDuration(() => + client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e) + ); + + expect(result).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(1000 - 100, 1000 + 100); + } + ); + + it( + 'the explain command has the calculated maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const timeout = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e); + + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + + expect(maxTimeMS).to.be.a('number'); + } + ); + + it( + 'the explained command does not have a maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const timeout = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e); + + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { + explain: { maxTimeMS } + } + } + ] = commands; + + expect(maxTimeMS).not.to.exist; + } + ); + }); + + describe('when timeoutMS and maxTimeMS are both provided', function () { + it( + 'an error is thrown indicating incompatibility of those options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + } + ) + .catch(e => e); + + expect(error).to.match(/Cannot use maxTimeMS with timeoutMS for explain commands/); + } + ); + }); + }); + + describe('when find({}, { explain: ...}) is used with timeoutMS', function () { + it( + 'an error is thrown indicating that explain is not supported with timeoutMS for this API', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .find( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + } + ) + .toArray() + .catch(e => e); + + expect(error).to.match( + /timeoutMS cannot be used with explain when explain is specified in findOptions/ + ); + } + ); + }); + + describe('when aggregate({}, { explain: ...}) is used with timeoutMS', function () { + it( + 'an error is thrown indicating that explain is not supported with timeoutMS for this API', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .aggregate([], { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + }) + .toArray() + .catch(e => e); + + expect(error).to.match( + /timeoutMS cannot be used with explain when explain is specified in aggregateOptions/ + ); + } + ); + }); + }); + + describe('fluent api timeoutMS precedence and inheritance', function () { + describe('find({}, { timeoutMS }).explain()', function () { + it( + 'respects the timeoutMS from the find options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 800 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('find().explain({}, { timeoutMS })', function () { + it( + 'respects the timeoutMS from the explain helper', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find(); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('find({}, { timeoutMS} ).explain({}, { timeoutMS })', function () { + it( + 'the timeoutMS from the explain helper has precedence', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 100 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS }).explain()', function () { + it( + 'respects the timeoutMS from the find options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate([], { timeoutMS: 800 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS })', function () { + it( + 'respects the timeoutMS from the explain helper', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate(); + + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS} ).explain({}, { timeoutMS })', function () { + it( + 'the timeoutMS from the explain helper has precedence', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate([], { timeoutMS: 100 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + }); + }); }); function explainValueToExpectation(explainValue: boolean | string) { diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 16024638fba..af596980c3f 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -199,7 +199,7 @@ export class TestConfiguration { } newClient(urlOrQueryOptions?: string | Record, serverOptions?: MongoClientOptions) { - serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); + serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); // Support MongoClient constructor form (url, options) for `newClient`. if (typeof urlOrQueryOptions === 'string') { @@ -294,7 +294,23 @@ export class TestConfiguration { * * @param options - overrides and settings for URI generation */ - url(options?: UrlOptions) { + url( + options?: UrlOptions & { + useMultipleMongoses?: boolean; + db?: string; + replicaSet?: string; + proxyURIParams?: ProxyParams; + username?: string; + password?: string; + auth?: { + username?: string; + password?: string; + }; + authSource?: string; + authMechanism?: string; + authMechanismProperties?: Record; + } + ) { options = { db: this.options.db, replicaSet: this.options.replicaSet, diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 38c0da6c092..cd79bb2d4c2 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -15,6 +15,7 @@ import { type Document, type HostAddress, MongoClient, + now, OP_MSG, Topology, type TopologyOptions @@ -616,8 +617,8 @@ export async function configureFailPoint( } } -export async function clearFailPoint(configuration: TestConfiguration, uri = configuration.url()) { - const utilClient = configuration.newClient(uri); +export async function clearFailPoint(configuration: TestConfiguration, url = configuration.url()) { + const utilClient = configuration.newClient(url); await utilClient.connect(); try { @@ -669,3 +670,21 @@ export async function makeMultiResponseBatchModelArray( return models; } + +/** + * A utility to measure the duration of an async function. This is intended to be used for CSOT + * testing, where we expect to timeout within a certain threshold and want to measure the duration + * of that operation. + */ +export async function measureDuration(f: () => Promise): Promise<{ + duration: number; + result: T | Error; +}> { + const start = now(); + const result = await f().catch(e => e); + const end = now(); + return { + duration: end - start, + result + }; +} diff --git a/test/unit/explain.test.ts b/test/unit/explain.test.ts index 8d71197a81a..282a6fe7c8e 100644 --- a/test/unit/explain.test.ts +++ b/test/unit/explain.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai'; import { it } from 'mocha'; -import { Explain, ExplainVerbosity } from '../mongodb'; +import { Explain, ExplainVerbosity, FindCursor, MongoClient, MongoDBNamespace } from '../mongodb'; describe('class Explain {}', function () { describe('static .fromOptions()', function () { @@ -50,4 +50,42 @@ describe('class Explain {}', function () { }); }); }); + + describe('parseTimeoutOptions()', function () { + const cursor = new FindCursor( + new MongoClient('mongodb://localhost:27027'), + MongoDBNamespace.fromString('foo.bar'), + {}, + {} + ); + + it('parseTimeoutOptions()', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions(); + expect(timeout).to.be.undefined; + expect(explain).to.be.undefined; + }); + + it('parseTimeoutOptions()', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions({ timeoutMS: 1_000 }); + expect(timeout).to.deep.equal({ timeoutMS: 1_000 }); + expect(explain).to.be.undefined; + }); + + it('parseTimeoutOptions()', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions({ + verbosity: 'queryPlanner' + }); + expect(timeout).to.be.undefined; + expect(explain).to.deep.equal({ verbosity: 'queryPlanner' }); + }); + + it('parseTimeoutOptions()', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions( + { verbosity: 'queryPlanner' }, + { timeoutMS: 1_000 } + ); + expect(timeout).to.deep.equal({ timeoutMS: 1_000 }); + expect(explain).to.deep.equal({ verbosity: 'queryPlanner' }); + }); + }); }); diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index a1e8f22e37d..a76aff98d91 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -54,6 +54,7 @@ const EXPECTED_EXPORTS = [ 'Decimal128', 'Double', 'ExplainVerbosity', + 'ExplainableCursor', 'FindCursor', 'GridFSBucket', 'GridFSBucketReadStream', From 5b1164fdc31808a72a89bf03477e0eb005e58153 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Mon, 21 Oct 2024 15:46:52 -0400 Subject: [PATCH 132/136] added schmea map --- .../client-side-encryption/driver.test.ts | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 08d1ac07d03..af89a6cea92 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -487,6 +487,7 @@ describe('CSOT', function () { describe('Auto encryption', function () { let setupClient; let keyVaultClient: MongoClient; + let dataKey; beforeEach(async function () { keyVaultClient = this.configuration.newClient(); @@ -496,7 +497,7 @@ describe('CSOT', function () { keyVaultNamespace: 'keyvault.datakeys', kmsProviders: getKmsProviders() }); - await clientEncryption.createDataKey('local'); + dataKey = await clientEncryption.createDataKey('local'); setupClient = this.configuration.newClient(); await setupClient .db() @@ -551,7 +552,21 @@ describe('CSOT', function () { keyVaultNamespace: 'keyvault.datakeys', kmsProviders: getKmsProviders(), schemaMap: { - 'test.test': {} + 'test.test': { + bsonType: 'object', + encryptMetadata: { + keyId: [new UUID(dataKey)] + }, + properties: { + a: { + encrypt: { + bsonType: 'int', + algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Random', + keyId: [new UUID(dataKey)] + } + } + } + } } }, timeoutMS: 1000 From aa9a0777f5aa5ba8a1708a370159f03b4864d703 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 22 Oct 2024 15:28:27 -0400 Subject: [PATCH 133/136] requested changes --- .../client-side-encryption/driver.test.ts | 367 ++++++++++-------- ...ient_side_operations_timeout.prose.test.ts | 4 +- ...lient_side_operations_timeout.unit.test.ts | 20 +- 3 files changed, 210 insertions(+), 181 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index af89a6cea92..df41b35cbd4 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -17,7 +17,7 @@ import { MongoOperationTimeoutError } from '../../mongodb'; import * as BSON from '../../mongodb'; -import { type FailPoint, getEncryptExtraOptions, sleep } from '../../tools/utils'; +import { type FailPoint, getEncryptExtraOptions, measureDuration, sleep } from '../../tools/utils'; const metadata = { requires: { @@ -536,82 +536,92 @@ describe('CSOT', function () { const metadata: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.0', - clientSideEncryption: '>=6.1.0' + clientSideEncryption: true } }; - context('when client is provided timeoutMS and command hangs', function () { - let encryptedClient: MongoClient; + context( + 'when an auto encrypted client is configured with timeoutMS and auto encryption takes longer than timeoutMS', + function () { + let encryptedClient: MongoClient; + const timeoutMS = 1000; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultClient, - keyVaultNamespace: 'keyvault.datakeys', - kmsProviders: getKmsProviders(), - schemaMap: { - 'test.test': { - bsonType: 'object', - encryptMetadata: { - keyId: [new UUID(dataKey)] - }, - properties: { - a: { - encrypt: { - bsonType: 'int', - algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Random', - keyId: [new UUID(dataKey)] + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getKmsProviders(), + schemaMap: { + 'test.test': { + bsonType: 'object', + encryptMetadata: { + keyId: [new UUID(dataKey)] + }, + properties: { + a: { + encrypt: { + bsonType: 'int', + algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Random', + keyId: [new UUID(dataKey)] + } } } } } - } - }, - timeoutMS: 1000 - } - ); - await encryptedClient.connect(); - }); + }, + timeoutMS + } + ); + await encryptedClient.connect(); + }); - afterEach(async function () { - await encryptedClient.close(); - }); + afterEach(async function () { + await encryptedClient.close(); + }); - it('the command should fail due to a timeout error', metadata, async function () { - const err = await encryptedClient - .db('test') - .collection('test') - .insertOne({ a: 1 }) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - }); - }); + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + encryptedClient + .db('test') + .collection('test') + .insertOne({ a: 1 }) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); + } + ); - context('when client is not provided timeoutMS and command hangs', function () { - let encryptedClient: MongoClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - keyVaultClient, - keyVaultNamespace: 'admin.datakeys', - kmsProviders: getKmsProviders() + context( + 'when an auto encrypted client is not configured with timeoutMS and auto encryption is delayed', + function () { + let encryptedClient: MongoClient; + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'admin.datakeys', + kmsProviders: getKmsProviders() + } } - } - ); - }); + ); + }); - afterEach(async function () { - encryptedClient.close(); - }); + afterEach(async function () { + await encryptedClient.close(); + }); - it('the command should not fail', metadata, async function () { - await encryptedClient.db('test').collection('test').aggregate([]).toArray(); - }); - }); + it('the command succeeds', metadata, async function () { + await encryptedClient.db('test').collection('test').aggregate([]).toArray(); + }); + } + ); }); describe('State machine', function () { @@ -619,54 +629,62 @@ describe('CSOT', function () { const timeoutContext = () => { return new CSOTTimeoutContext({ - timeoutMS: 500, + timeoutMS: 1000, serverSelectionTimeoutMS: 30000 }); }; - describe('#markCommand', function () { - context('when provided timeoutContext and command hangs', function () { - let encryptedClient: MongoClient; + const timeoutMS = 1000; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - timeoutMS: 500 + describe('#markCommand', function () { + context( + 'when csot is enabled and markCommand() takes longer than the remaining timeoutMS', + function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); } ); - await encryptedClient.connect(); - - const stub = sinon - // @ts-expect-error accessing private method - .stub(Connection.prototype, 'sendCommand') - .callsFake(async function* (...args) { - await sleep(1000); - yield* stub.wrappedMethod.call(this, ...args); - }); - }); - - afterEach(async function () { - await encryptedClient?.close(); - sinon.restore(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .markCommand( - encryptedClient, - 'test.test', - BSON.serialize({ ping: 1 }), - timeoutContext() - ) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); + } + ); }); describe('#fetchKeys', function () { @@ -699,36 +717,47 @@ describe('CSOT', function () { await setupClient.close(); }); - context('when provided timeoutContext and command hangs', function () { - let encryptedClient; + context( + 'when csot is enabled and fetchKeys() takes longer than the remaining timeoutMS', + function () { + let encryptedClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - timeoutMS: 1000 + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchKeys( + encryptedClient, + 'test.test', + BSON.serialize({ a: 1 }), + timeoutContext() + ) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); } ); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient?.close(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); + } + ); - context('when not provided timeoutContext and command hangs', function () { + context('when csot is not enabled and fetchKeys() is delayed', function () { let encryptedClient; beforeEach(async function () { @@ -740,13 +769,9 @@ describe('CSOT', function () { await encryptedClient?.close(); }); - it( - 'the command should not fail due to a server error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - await stateMachine.fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })); - } - ); + it('the command succeeds', { requires: { mongodb: '>=4.2.0' } }, async function () { + await stateMachine.fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })); + }); }); }); @@ -780,36 +805,42 @@ describe('CSOT', function () { await setupClient.close(); }); - context('when provided timeoutContext and command hangs', function () { - let encryptedClient: MongoClient; + context( + 'when csot is enabled and fetchCollectionInfo() takes longer than the remaining timeoutMS', + function () { + let encryptedClient: MongoClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - timeoutMS: 1000 + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2.0' } }, + async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); } ); - await encryptedClient.connect(); - }); - - afterEach(async function () { - await encryptedClient?.close(); - }); - - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const err = await stateMachine - .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); - } - ); - }); + } + ); - context('when not provided timeoutContext and command hangs', function () { + context('when csot is not enabled and fetchCollectionInfo() is delayed', function () { let encryptedClient: MongoClient; beforeEach(async function () { @@ -821,13 +852,9 @@ describe('CSOT', function () { await encryptedClient?.close(); }); - it( - 'the command should not fail due to a server error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); - } - ); + it('the command succeeds', { requires: { mongodb: '>=4.2.0' } }, async function () { + await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); + }); }); }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 5948ac1a037..c7d5173a50e 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -139,9 +139,7 @@ describe('CSOT spec prose tests', function () { childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { - family: 6, - monitorCommands: true, - serverSelectionTimeoutMS: 2000 + monitorCommands: true }); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index a7096a71bcf..f9630fcdc6b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -22,7 +22,7 @@ import { TimeoutContext, Topology } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { measureDuration, sleep } from '../../tools/utils'; import { createTimerSandbox } from '../../unit/timer_sandbox'; // TODO(NODE-5824): Implement CSOT prose tests @@ -186,6 +186,7 @@ describe('CSOT spec unit tests', function () { describe('Auto Encryption', function () { context('when provided timeoutMS and command hangs', function () { let encryptedClient; + const timeoutMS = 500; beforeEach(async function () { encryptedClient = this.configuration.newClient( @@ -206,7 +207,7 @@ describe('CSOT spec unit tests', function () { local: { key: Buffer.alloc(96) } } }, - timeoutMS: 500 + timeoutMS } ); await encryptedClient.connect(); @@ -215,7 +216,7 @@ describe('CSOT spec unit tests', function () { // @ts-expect-error accessing private method .stub(Connection.prototype, 'sendCommand') .callsFake(async function* (...args) { - await sleep(1000); + await sleep(timeoutMS * 2); yield* stub.wrappedMethod.call(this, ...args); }); }); @@ -229,11 +230,14 @@ describe('CSOT spec unit tests', function () { 'the command should fail due to a timeout error', { requires: { mongodb: '>=4.2' } }, async function () { - const err = await encryptedClient - .db() - .command({ ping: 1 }) - .catch(e => e); - expect(err).to.be.instanceOf(MongoOperationTimeoutError); + const { duration, result: error } = await measureDuration(() => + encryptedClient + .db() + .command({ ping: 1 }) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); } ); }); From df6bb05c6a31bf7d17279d1fc0495fbee45e3ad8 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Tue, 22 Oct 2024 15:36:17 -0400 Subject: [PATCH 134/136] consolidate meta data --- .../client-side-encryption/driver.test.ts | 120 +++++++++--------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index df41b35cbd4..202501fad22 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -636,6 +636,12 @@ describe('CSOT', function () { const timeoutMS = 1000; + const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=4.2.0' + } + }; + describe('#markCommand', function () { context( 'when csot is enabled and markCommand() takes longer than the remaining timeoutMS', @@ -665,24 +671,20 @@ describe('CSOT', function () { sinon.restore(); }); - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const { duration, result: error } = await measureDuration(() => - stateMachine - .markCommand( - encryptedClient, - 'test.test', - BSON.serialize({ ping: 1 }), - timeoutContext() - ) - .catch(e => e) - ); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); - } - ); + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); } ); }); @@ -736,24 +738,15 @@ describe('CSOT', function () { await encryptedClient?.close(); }); - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const { duration, result: error } = await measureDuration(() => - stateMachine - .fetchKeys( - encryptedClient, - 'test.test', - BSON.serialize({ a: 1 }), - timeoutContext() - ) - .catch(e => e) - ); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); - } - ); + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); } ); @@ -769,7 +762,7 @@ describe('CSOT', function () { await encryptedClient?.close(); }); - it('the command succeeds', { requires: { mongodb: '>=4.2.0' } }, async function () { + it('the command succeeds', metadata, async function () { await stateMachine.fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })); }); }); @@ -807,6 +800,7 @@ describe('CSOT', function () { context( 'when csot is enabled and fetchCollectionInfo() takes longer than the remaining timeoutMS', + metadata, function () { let encryptedClient: MongoClient; @@ -824,38 +818,38 @@ describe('CSOT', function () { await encryptedClient?.close(); }); - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2.0' } }, - async function () { - const { duration, result: error } = await measureDuration(() => - stateMachine - .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) - .catch(e => e) - ); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); - } - ); + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); } ); - context('when csot is not enabled and fetchCollectionInfo() is delayed', function () { - let encryptedClient: MongoClient; + context( + 'when csot is not enabled and fetchCollectionInfo() is delayed', + metadata, + function () { + let encryptedClient: MongoClient; - beforeEach(async function () { - encryptedClient = this.configuration.newClient(); - await encryptedClient.connect(); - }); + beforeEach(async function () { + encryptedClient = this.configuration.newClient(); + await encryptedClient.connect(); + }); - afterEach(async function () { - await encryptedClient?.close(); - }); + afterEach(async function () { + await encryptedClient?.close(); + }); - it('the command succeeds', { requires: { mongodb: '>=4.2.0' } }, async function () { - await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); - }); - }); + it('the command succeeds', metadata, async function () { + await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); + }); + } + ); }); }); }); From fec88873921a25f6f7883fd1d8ddc20abc5494fb Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Wed, 23 Oct 2024 16:37:42 -0400 Subject: [PATCH 135/136] fixed failing test --- .../client_side_operations_timeout.unit.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index f9630fcdc6b..5d9ab07ca67 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -184,7 +184,7 @@ describe('CSOT spec unit tests', function () { }); describe('Auto Encryption', function () { - context('when provided timeoutMS and command hangs', function () { + context('when an auto encrypted client is configured with timeoutMS and the command takes longer than timeoutMS', function () { let encryptedClient; const timeoutMS = 500; @@ -216,7 +216,7 @@ describe('CSOT spec unit tests', function () { // @ts-expect-error accessing private method .stub(Connection.prototype, 'sendCommand') .callsFake(async function* (...args) { - await sleep(timeoutMS * 2); + await sleep(timeoutMS + 50); yield* stub.wrappedMethod.call(this, ...args); }); }); From a0ebd933d7ae33b6f0c480424c9152bacfe95211 Mon Sep 17 00:00:00 2001 From: Aditi Khare Date: Thu, 24 Oct 2024 11:25:21 -0400 Subject: [PATCH 136/136] lint fix --- ...lient_side_operations_timeout.unit.test.ts | 111 +++++++++--------- 1 file changed, 57 insertions(+), 54 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 5d9ab07ca67..90b04e9a3ed 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -184,63 +184,66 @@ describe('CSOT spec unit tests', function () { }); describe('Auto Encryption', function () { - context('when an auto encrypted client is configured with timeoutMS and the command takes longer than timeoutMS', function () { - let encryptedClient; - const timeoutMS = 500; - - beforeEach(async function () { - encryptedClient = this.configuration.newClient( - {}, - { - autoEncryption: { - extraOptions: { - mongocryptdBypassSpawn: true, - mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', - mongocryptdSpawnArgs: [ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27017' - ] + context( + 'when an auto encrypted client is configured with timeoutMS and the command takes longer than timeoutMS', + function () { + let encryptedClient; + const timeoutMS = 500; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + extraOptions: { + mongocryptdBypassSpawn: true, + mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', + mongocryptdSpawnArgs: [ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27017' + ] + }, + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } }, - keyVaultNamespace: 'admin.datakeys', - kmsProviders: { - aws: { accessKeyId: 'example', secretAccessKey: 'example' }, - local: { key: Buffer.alloc(96) } - } - }, - timeoutMS - } - ); - await encryptedClient.connect(); - - const stub = sinon - // @ts-expect-error accessing private method - .stub(Connection.prototype, 'sendCommand') - .callsFake(async function* (...args) { - await sleep(timeoutMS + 50); - yield* stub.wrappedMethod.call(this, ...args); - }); - }); + timeoutMS + } + ); + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(timeoutMS + 50); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); - afterEach(async function () { - await encryptedClient?.close(); - sinon.restore(); - }); + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); - it( - 'the command should fail due to a timeout error', - { requires: { mongodb: '>=4.2' } }, - async function () { - const { duration, result: error } = await measureDuration(() => - encryptedClient - .db() - .command({ ping: 1 }) - .catch(e => e) - ); - expect(error).to.be.instanceOf(MongoOperationTimeoutError); - expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); - } - ); - }); + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2' } }, + async function () { + const { duration, result: error } = await measureDuration(() => + encryptedClient + .db() + .command({ ping: 1 }) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + } + ); + } + ); }); });