diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts
index 9eaae6e81d1..6b1d3c24171 100644
--- a/src/cmap/connection.ts
+++ b/src/cmap/connection.ts
@@ -721,6 +721,8 @@ export class Connection extends TypedEventEmitter<ConnectionEvents> {
           throw new MongoOperationTimeoutError('Timed out at socket write');
         }
         throw error;
+      } finally {
+        timeout.clear();
       }
     }
     return await drainEvent;
diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts
index 64c636f41f1..f6732618330 100644
--- a/src/cmap/wire_protocol/on_data.ts
+++ b/src/cmap/wire_protocol/on_data.ts
@@ -116,6 +116,7 @@ export function onData(
     emitter.off('data', eventHandler);
     emitter.off('error', errorHandler);
     finished = true;
+    timeoutForSocketRead?.clear();
     const doneResult = { value: undefined, done: finished } as const;
 
     for (const promise of unconsumedPromises) {
diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts
index 255a977a5f9..96d28d05584 100644
--- a/src/cursor/abstract_cursor.ts
+++ b/src/cursor/abstract_cursor.ts
@@ -243,7 +243,7 @@ export abstract class AbstractCursor<
         options.timeoutMode ??
         (options.tailable ? CursorTimeoutMode.ITERATION : CursorTimeoutMode.LIFETIME);
     } else {
-      if (options.timeoutMode != null)
+      if (options.timeoutMode != null && options.timeoutContext == null)
         throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS');
     }
 
diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts
index 69e166effca..d9da82d367b 100644
--- a/src/cursor/client_bulk_write_cursor.ts
+++ b/src/cursor/client_bulk_write_cursor.ts
@@ -34,7 +34,7 @@ export class ClientBulkWriteCursor extends AbstractCursor {
   constructor(
     client: MongoClient,
     commandBuilder: ClientBulkWriteCommandBuilder,
-    options: ClientBulkWriteOptions = {}
+    options: ClientBulkWriteCursorOptions = {}
   ) {
     super(client, new MongoDBNamespace('admin', '$cmd'), options);
 
@@ -71,7 +71,11 @@ export class ClientBulkWriteCursor extends AbstractCursor {
       session
     });
 
-    const response = await executeOperation(this.client, clientBulkWriteOperation);
+    const response = await executeOperation(
+      this.client,
+      clientBulkWriteOperation,
+      this.timeoutContext
+    );
     this.cursorResponse = response;
 
     return { server: clientBulkWriteOperation.server, session, response };
diff --git a/src/operations/client_bulk_write/executor.ts b/src/operations/client_bulk_write/executor.ts
index f02b7b6e795..ab7c4404f66 100644
--- a/src/operations/client_bulk_write/executor.ts
+++ b/src/operations/client_bulk_write/executor.ts
@@ -1,4 +1,5 @@
 import { type Document } from '../../bson';
+import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor';
 import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
 import {
   MongoClientBulkWriteError,
@@ -7,6 +8,8 @@ import {
   MongoServerError
 } from '../../error';
 import { type MongoClient } from '../../mongo_client';
+import { TimeoutContext } from '../../timeout';
+import { resolveTimeoutOptions } from '../../utils';
 import { WriteConcern } from '../../write_concern';
 import { executeOperation } from '../execute_operation';
 import { ClientBulkWriteOperation } from './client_bulk_write';
@@ -86,17 +89,26 @@ export class ClientBulkWriteExecutor {
       pkFactory
     );
     // Unacknowledged writes need to execute all batches and return { ok: 1}
+    const resolvedOptions = resolveTimeoutOptions(this.client, this.options);
+    const context = TimeoutContext.create(resolvedOptions);
+
     if (this.options.writeConcern?.w === 0) {
       while (commandBuilder.hasNextBatch()) {
         const operation = new ClientBulkWriteOperation(commandBuilder, this.options);
-        await executeOperation(this.client, operation);
+        await executeOperation(this.client, operation, context);
       }
       return ClientBulkWriteResultsMerger.unacknowledged();
     } else {
       const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
       // For each command will will create and exhaust a cursor for the results.
       while (commandBuilder.hasNextBatch()) {
-        const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options);
+        const cursorContext = new CursorTimeoutContext(context, Symbol());
+        const options = {
+          ...this.options,
+          timeoutContext: cursorContext,
+          ...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME })
+        };
+        const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options);
         try {
           await resultsMerger.merge(cursor);
         } catch (error) {
diff --git a/src/sdam/server.ts b/src/sdam/server.ts
index 7ab2d9a043f..35a6f1de695 100644
--- a/src/sdam/server.ts
+++ b/src/sdam/server.ts
@@ -106,7 +106,7 @@ export type ServerEvents = {
   EventEmitterWithState;
 
 /** @internal */
-export type ServerCommandOptions = Omit<CommandOptions, 'timeoutContext'> & {
+export type ServerCommandOptions = Omit<CommandOptions, 'timeoutContext' | 'socketTimeoutMS'> & {
   timeoutContext: TimeoutContext;
 };
 
diff --git a/src/utils.ts b/src/utils.ts
index 0c6477168ca..45aafb8aec5 100644
--- a/src/utils.ts
+++ b/src/utils.ts
@@ -36,6 +36,7 @@ import { ServerType } from './sdam/common';
 import type { Server } from './sdam/server';
 import type { Topology } from './sdam/topology';
 import type { ClientSession } from './sessions';
+import { type TimeoutContextOptions } from './timeout';
 import { WriteConcern } from './write_concern';
 
 /**
@@ -515,6 +516,18 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean {
   return keys.length > 0 && keys[0][0] === '$';
 }
 
+export function resolveTimeoutOptions<T extends Partial<TimeoutContextOptions>>(
+  client: MongoClient,
+  options: T
+): T &
+  Pick<
+    MongoClient['s']['options'],
+    'timeoutMS' | 'serverSelectionTimeoutMS' | 'waitQueueTimeoutMS' | 'socketTimeoutMS'
+  > {
+  const { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS } =
+    client.s.options;
+  return { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS, ...options };
+}
 /**
  * Merge inherited properties from parent into options, prioritizing values from options,
  * then values from parent.
diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts
index 80da92e10a3..458447a437c 100644
--- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts
+++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts
@@ -21,7 +21,8 @@ import {
   promiseWithResolvers,
   squashError
 } from '../../mongodb';
-import { type FailPoint } from '../../tools/utils';
+import { type FailPoint, makeMultiBatchWrite } from '../../tools/utils';
+import { filterForCommands } from '../shared';
 
 // TODO(NODE-5824): Implement CSOT prose tests
 describe('CSOT spec prose tests', function () {
@@ -1183,9 +1184,9 @@ describe('CSOT spec prose tests', function () {
     });
   });
 
-  describe.skip(
+  describe(
     '11. Multi-batch bulkWrites',
-    { requires: { mongodb: '>=8.0', serverless: 'forbid' } },
+    { requires: { mongodb: '>=8.0', serverless: 'forbid', topology: 'single' } },
     function () {
       /**
        * ### 11. Multi-batch bulkWrites
@@ -1245,9 +1246,6 @@ describe('CSOT spec prose tests', function () {
         }
       };
 
-      let maxBsonObjectSize: number;
-      let maxMessageSizeBytes: number;
-
       beforeEach(async function () {
         await internalClient
           .db('db')
@@ -1256,29 +1254,20 @@ describe('CSOT spec prose tests', function () {
           .catch(() => null);
         await internalClient.db('admin').command(failpoint);
 
-        const hello = await internalClient.db('admin').command({ hello: 1 });
-        maxBsonObjectSize = hello.maxBsonObjectSize;
-        maxMessageSizeBytes = hello.maxMessageSizeBytes;
-
         client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true });
       });
 
-      it.skip('performs two bulkWrites which fail to complete before 2000 ms', async function () {
+      it('performs two bulkWrites which fail to complete before 2000 ms', async function () {
         const writes = [];
-        client.on('commandStarted', ev => writes.push(ev));
+        client.on('commandStarted', filterForCommands('bulkWrite', writes));
 
-        const length = maxMessageSizeBytes / maxBsonObjectSize + 1;
-        const models = Array.from({ length }, () => ({
-          namespace: 'db.coll',
-          name: 'insertOne' as const,
-          document: { a: 'b'.repeat(maxBsonObjectSize - 500) }
-        }));
+        const models = await makeMultiBatchWrite(this.configuration);
 
         const error = await client.bulkWrite(models).catch(error => error);
 
         expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError);
-        expect(writes.map(ev => ev.commandName)).to.deep.equal(['bulkWrite', 'bulkWrite']);
-      }).skipReason = 'TODO(NODE-6403): client.bulkWrite is implemented in a follow up';
+        expect(writes).to.have.lengthOf(2);
+      });
     }
   );
 });
diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts
index 68d7b16f54d..a981a9113df 100644
--- a/test/integration/client-side-operations-timeout/node_csot.test.ts
+++ b/test/integration/client-side-operations-timeout/node_csot.test.ts
@@ -279,12 +279,16 @@ describe('CSOT driver tests', metadata, () => {
           .stub(Connection.prototype, 'readMany')
           .callsFake(async function* (...args) {
             const realIterator = readManyStub.wrappedMethod.call(this, ...args);
-            const cmd = commandSpy.lastCall.args.at(1);
-            if ('giveMeWriteErrors' in cmd) {
-              await realIterator.next().catch(() => null); // dismiss response
-              yield { parse: () => writeErrorsReply };
-            } else {
-              yield (await realIterator.next()).value;
+            try {
+              const cmd = commandSpy.lastCall.args.at(1);
+              if ('giveMeWriteErrors' in cmd) {
+                await realIterator.next().catch(() => null); // dismiss response
+                yield { parse: () => writeErrorsReply };
+              } else {
+                yield (await realIterator.next()).value;
+              }
+            } finally {
+              realIterator.return();
             }
           });
       });
diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts
index f5c4c55cf05..0cb90b3b592 100644
--- a/test/integration/collection-management/collection_db_management.test.ts
+++ b/test/integration/collection-management/collection_db_management.test.ts
@@ -1,6 +1,6 @@
 import { expect } from 'chai';
 
-import { Collection, type Db, type MongoClient } from '../../mongodb';
+import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb';
 
 describe('Collection Management and Db Management', function () {
   let client: MongoClient;
@@ -16,7 +16,7 @@ describe('Collection Management and Db Management', function () {
   });
 
   it('returns a collection object after calling createCollection', async function () {
-    const collection = await db.createCollection('collection');
+    const collection = await db.createCollection(new ObjectId().toHexString());
     expect(collection).to.be.instanceOf(Collection);
   });
 
diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts
new file mode 100644
index 00000000000..6177077b632
--- /dev/null
+++ b/test/integration/crud/client_bulk_write.test.ts
@@ -0,0 +1,384 @@
+import { expect } from 'chai';
+import { setTimeout } from 'timers/promises';
+
+import {
+  type CommandStartedEvent,
+  type Connection,
+  type ConnectionPool,
+  type MongoClient,
+  MongoOperationTimeoutError,
+  now,
+  TimeoutContext
+} from '../../mongodb';
+import {
+  clearFailPoint,
+  configureFailPoint,
+  makeMultiBatchWrite,
+  makeMultiResponseBatchModelArray
+} from '../../tools/utils';
+import { filterForCommands } from '../shared';
+
+const metadata: MongoDBMetadataUI = {
+  requires: {
+    mongodb: '>=8.0',
+    serverless: 'forbid'
+  }
+};
+
+describe('Client Bulk Write', function () {
+  let client: MongoClient;
+
+  afterEach(async function () {
+    await client?.close();
+    await clearFailPoint(this.configuration);
+  });
+
+  describe('CSOT enabled', function () {
+    describe('when timeoutMS is set on the client', function () {
+      beforeEach(async function () {
+        client = this.configuration.newClient({}, { timeoutMS: 300 });
+        await client.connect();
+        await configureFailPoint(this.configuration, {
+          configureFailPoint: 'failCommand',
+          mode: { times: 1 },
+          data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] }
+        });
+      });
+
+      it('timeoutMS is used as the timeout for the bulk write', metadata, async function () {
+        const start = now();
+        const timeoutError = await client
+          .bulkWrite([
+            {
+              name: 'insertOne',
+              namespace: 'foo.bar',
+              document: { age: 10 }
+            }
+          ])
+          .catch(e => e);
+        const end = now();
+        expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+        expect(end - start).to.be.within(300 - 100, 300 + 100);
+      });
+    });
+
+    describe('when timeoutMS is set on the bulkWrite operation', function () {
+      beforeEach(async function () {
+        client = this.configuration.newClient({});
+
+        await client.connect();
+
+        await configureFailPoint(this.configuration, {
+          configureFailPoint: 'failCommand',
+          mode: { times: 1 },
+          data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] }
+        });
+      });
+
+      it('timeoutMS is used as the timeout for the bulk write', metadata, async function () {
+        const start = now();
+        const timeoutError = await client
+          .bulkWrite(
+            [
+              {
+                name: 'insertOne',
+                namespace: 'foo.bar',
+                document: { age: 10 }
+              }
+            ],
+            { timeoutMS: 300 }
+          )
+          .catch(e => e);
+        const end = now();
+        expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+        expect(end - start).to.be.within(300 - 100, 300 + 100);
+      });
+    });
+
+    describe('when timeoutMS is set on both the client and operation options', function () {
+      beforeEach(async function () {
+        client = this.configuration.newClient({}, { timeoutMS: 1500 });
+
+        await client.connect();
+
+        await configureFailPoint(this.configuration, {
+          configureFailPoint: 'failCommand',
+          mode: { times: 1 },
+          data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] }
+        });
+      });
+
+      it('bulk write options take precedence over the client options', metadata, async function () {
+        const start = now();
+        const timeoutError = await client
+          .bulkWrite(
+            [
+              {
+                name: 'insertOne',
+                namespace: 'foo.bar',
+                document: { age: 10 }
+              }
+            ],
+            { timeoutMS: 300 }
+          )
+          .catch(e => e);
+        const end = now();
+        expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+        expect(end - start).to.be.within(300 - 100, 300 + 100);
+      });
+    });
+
+    describe(
+      'unacknowledged writes',
+      {
+        requires: {
+          mongodb: '>=8.0',
+          topology: 'single'
+        }
+      },
+      function () {
+        let connection: Connection;
+        let pool: ConnectionPool;
+
+        beforeEach(async function () {
+          client = this.configuration.newClient({}, { maxPoolSize: 1, waitQueueTimeoutMS: 2000 });
+
+          await client.connect();
+
+          pool = Array.from(client.topology.s.servers.values())[0].pool;
+          connection = await pool.checkOut({
+            timeoutContext: TimeoutContext.create({
+              serverSelectionTimeoutMS: 30000,
+              waitQueueTimeoutMS: 1000
+            })
+          });
+        });
+
+        afterEach(async function () {
+          pool = Array.from(client.topology.s.servers.values())[0].pool;
+          pool.checkIn(connection);
+          await client.close();
+        });
+
+        it('a single batch bulk write does not take longer than timeoutMS', async function () {
+          const start = now();
+          let end;
+          const timeoutError = client
+            .bulkWrite(
+              [
+                {
+                  name: 'insertOne',
+                  namespace: 'foo.bar',
+                  document: { age: 10 }
+                }
+              ],
+              { timeoutMS: 200, writeConcern: { w: 0 } }
+            )
+            .catch(e => e)
+            .then(e => {
+              end = now();
+              return e;
+            });
+
+          await setTimeout(250);
+
+          expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+          expect(end - start).to.be.within(200 - 100, 200 + 100);
+        });
+
+        it(
+          'timeoutMS applies to all batches',
+          {
+            requires: {
+              mongodb: '>=8.0',
+              topology: 'single'
+            }
+          },
+          async function () {
+            const models = await makeMultiBatchWrite(this.configuration);
+            const start = now();
+            let end;
+            const timeoutError = client
+              .bulkWrite(models, {
+                timeoutMS: 400,
+                writeConcern: { w: 0 }
+              })
+              .catch(e => e)
+              .then(r => {
+                end = now();
+                return r;
+              });
+
+            await setTimeout(210);
+
+            pool.checkIn(connection);
+            connection = await pool.checkOut({
+              timeoutContext: TimeoutContext.create({
+                serverSelectionTimeoutMS: 30000,
+                waitQueueTimeoutMS: 1000
+              })
+            });
+
+            await setTimeout(210);
+
+            expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+            expect(end - start).to.be.within(400 - 100, 400 + 100);
+          }
+        );
+      }
+    );
+
+    describe('acknowledged writes', metadata, function () {
+      describe('when a bulk write command times out', function () {
+        beforeEach(async function () {
+          client = this.configuration.newClient({}, { timeoutMS: 1500 });
+
+          await client.connect();
+
+          await configureFailPoint(this.configuration, {
+            configureFailPoint: 'failCommand',
+            mode: { times: 1 },
+            data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] }
+          });
+        });
+
+        it('the operation times out', metadata, async function () {
+          const start = now();
+          const timeoutError = await client
+            .bulkWrite(
+              [
+                {
+                  name: 'insertOne',
+                  namespace: 'foo.bar',
+                  document: { age: 10 }
+                }
+              ],
+              { timeoutMS: 300 }
+            )
+            .catch(e => e);
+          const end = now();
+          expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+          expect(end - start).to.be.within(300 - 100, 300 + 100);
+        });
+      });
+
+      describe('when the timeout is reached while iterating the result cursor', function () {
+        const commands: CommandStartedEvent[] = [];
+
+        beforeEach(async function () {
+          client = this.configuration.newClient({}, { monitorCommands: true, minPoolSize: 5 });
+          client.on('commandStarted', filterForCommands(['getMore'], commands));
+          await client.connect();
+
+          await configureFailPoint(this.configuration, {
+            configureFailPoint: 'failCommand',
+            mode: { times: 1 },
+            data: { blockConnection: true, blockTimeMS: 1400, failCommands: ['getMore'] }
+          });
+        });
+
+        it('the bulk write operation times out', metadata, async function () {
+          const models = await makeMultiResponseBatchModelArray(this.configuration);
+          const start = now();
+          const timeoutError = await client
+            .bulkWrite(models, {
+              verboseResults: true,
+              timeoutMS: 1500
+            })
+            .catch(e => e);
+
+          const end = now();
+          expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+
+          // DRIVERS-3005 - killCursors causes cursor cleanup to extend past timeoutMS.
+          // The amount of time killCursors takes is wildly variable and can take up to almost
+          // 600-700ms sometimes.
+          expect(end - start).to.be.within(1500, 1500 + 800);
+          expect(commands).to.have.lengthOf(1);
+        });
+      });
+
+      describe('if the cursor encounters an error and a killCursors is sent', function () {
+        const commands: CommandStartedEvent[] = [];
+
+        beforeEach(async function () {
+          client = this.configuration.newClient({}, { monitorCommands: true });
+
+          client.on('commandStarted', filterForCommands(['killCursors'], commands));
+          await client.connect();
+
+          await configureFailPoint(this.configuration, {
+            configureFailPoint: 'failCommand',
+            mode: { times: 2 },
+            data: {
+              blockConnection: true,
+              blockTimeMS: 3000,
+              failCommands: ['getMore', 'killCursors']
+            }
+          });
+        });
+
+        it(
+          'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command',
+          metadata,
+          async function () {
+            const models = await makeMultiResponseBatchModelArray(this.configuration);
+            const timeoutError = await client
+              .bulkWrite(models, { ordered: true, timeoutMS: 2800, verboseResults: true })
+              .catch(e => e);
+
+            expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+
+            const [
+              {
+                command: { maxTimeMS }
+              }
+            ] = commands;
+            expect(maxTimeMS).to.be.greaterThan(1000);
+          }
+        );
+      });
+
+      describe('when the bulk write is executed in multiple batches', function () {
+        const commands: CommandStartedEvent[] = [];
+
+        beforeEach(async function () {
+          client = this.configuration.newClient({}, { monitorCommands: true });
+
+          client.on('commandStarted', filterForCommands('bulkWrite', commands));
+          await client.connect();
+
+          await configureFailPoint(this.configuration, {
+            configureFailPoint: 'failCommand',
+            mode: { times: 2 },
+            data: { blockConnection: true, blockTimeMS: 1010, failCommands: ['bulkWrite'] }
+          });
+        });
+
+        it(
+          'timeoutMS applies to the duration of all batches',
+          {
+            requires: {
+              ...metadata.requires,
+              topology: 'single'
+            }
+          },
+          async function () {
+            const models = await makeMultiBatchWrite(this.configuration);
+            const start = now();
+            const timeoutError = await client
+              .bulkWrite(models, {
+                timeoutMS: 2000
+              })
+              .catch(e => e);
+
+            const end = now();
+            expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError);
+            expect(end - start).to.be.within(2000 - 100, 2000 + 100);
+            expect(commands.length, 'Test must execute two batches.').to.equal(2);
+          }
+        );
+      });
+    });
+  });
+});
diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts
index 1d637486226..16024638fba 100644
--- a/test/tools/runner/config.ts
+++ b/test/tools/runner/config.ts
@@ -7,6 +7,7 @@ import {
   type AuthMechanism,
   HostAddress,
   MongoClient,
+  type MongoClientOptions,
   type ServerApi,
   TopologyType,
   type WriteConcernSettings
@@ -82,7 +83,7 @@ export class TestConfiguration {
     auth?: { username: string; password: string; authSource?: string };
     proxyURIParams?: ProxyParams;
   };
-  serverApi: ServerApi;
+  serverApi?: ServerApi;
   activeResources: number;
   isSrv: boolean;
   serverlessCredentials: { username: string | undefined; password: string | undefined };
@@ -171,13 +172,34 @@ export class TestConfiguration {
     return this.options.replicaSet;
   }
 
+  /**
+   * Returns a `hello`, executed against `uri`.
+   */
+  async hello(uri = this.uri) {
+    const client = this.newClient(uri);
+    try {
+      await client.connect();
+      const { maxBsonObjectSize, maxMessageSizeBytes, maxWriteBatchSize, ...rest } = await client
+        .db('admin')
+        .command({ hello: 1 });
+      return {
+        maxBsonObjectSize,
+        maxMessageSizeBytes,
+        maxWriteBatchSize,
+        ...rest
+      };
+    } finally {
+      await client.close();
+    }
+  }
+
   isOIDC(uri: string, env: string): boolean {
     if (!uri) return false;
     return uri.indexOf('MONGODB-OIDC') > -1 && uri.indexOf(`ENVIRONMENT:${env}`) > -1;
   }
 
-  newClient(urlOrQueryOptions?: string | Record<string, any>, serverOptions?: Record<string, any>) {
-    serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions);
+  newClient(urlOrQueryOptions?: string | Record<string, any>, serverOptions?: MongoClientOptions) {
+    serverOptions = Object.assign(<MongoClientOptions>{}, getEnvironmentalOptions(), serverOptions);
 
     // Support MongoClient constructor form (url, options) for `newClient`.
     if (typeof urlOrQueryOptions === 'string') {
diff --git a/test/tools/utils.ts b/test/tools/utils.ts
index 8614bd7d64c..8ebc5e8f532 100644
--- a/test/tools/utils.ts
+++ b/test/tools/utils.ts
@@ -11,6 +11,7 @@ import { setTimeout } from 'timers';
 import { inspect, promisify } from 'util';
 
 import {
+  type AnyClientBulkWriteModel,
   type Document,
   type HostAddress,
   MongoClient,
@@ -18,6 +19,7 @@ import {
   Topology,
   type TopologyOptions
 } from '../mongodb';
+import { type TestConfiguration } from './runner/config';
 import { runUnifiedSuite } from './unified-spec-runner/runner';
 import {
   type CollectionData,
@@ -598,3 +600,68 @@ export async function waitUntilPoolsFilled(
 
   await Promise.all([wait$(), client.connect()]);
 }
+
+export async function configureFailPoint(configuration: TestConfiguration, failPoint: FailPoint) {
+  const utilClient = configuration.newClient();
+  await utilClient.connect();
+
+  try {
+    await utilClient.db('admin').command(failPoint);
+  } finally {
+    await utilClient.close();
+  }
+}
+
+export async function clearFailPoint(configuration: TestConfiguration) {
+  const utilClient = configuration.newClient();
+  await utilClient.connect();
+
+  try {
+    await utilClient.db('admin').command(<FailPoint>{
+      configureFailPoint: 'failCommand',
+      mode: 'off'
+    });
+  } finally {
+    await utilClient.close();
+  }
+}
+
+export async function makeMultiBatchWrite(
+  configuration: TestConfiguration
+): Promise<AnyClientBulkWriteModel[]> {
+  const { maxBsonObjectSize, maxMessageSizeBytes } = await configuration.hello();
+
+  const length = maxMessageSizeBytes / maxBsonObjectSize + 1;
+  const models = Array.from({ length }, () => ({
+    namespace: 'db.coll',
+    name: 'insertOne' as const,
+    document: { a: 'b'.repeat(maxBsonObjectSize - 500) }
+  }));
+
+  return models;
+}
+
+export async function makeMultiResponseBatchModelArray(
+  configuration: TestConfiguration
+): Promise<AnyClientBulkWriteModel[]> {
+  const { maxBsonObjectSize } = await configuration.hello();
+  const namespace = `foo.${new BSON.ObjectId().toHexString()}`;
+  const models: AnyClientBulkWriteModel[] = [
+    {
+      name: 'updateOne',
+      namespace,
+      update: { $set: { age: 1 } },
+      upsert: true,
+      filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) }
+    },
+    {
+      name: 'updateOne',
+      namespace,
+      update: { $set: { age: 1 } },
+      upsert: true,
+      filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) }
+    }
+  ];
+
+  return models;
+}