diff --git a/sdk/cosmosdb/cosmos/.vscode/launch.json b/sdk/cosmosdb/cosmos/.vscode/launch.json new file mode 100644 index 000000000000..79bae02a276a --- /dev/null +++ b/sdk/cosmosdb/cosmos/.vscode/launch.json @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Test: Current Open File", + "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", + "type": "node", + "args": [ + "--timeout", + "100000", + "-r", + "test/mocha.env.ts", + "-r", + "ts-node/register", + "-r", + "esm", + "-r", + "dotenv/config", + "-r", + "./test/public/common/setup.ts", + "--colors", + "**/${fileBasenameNoExtension}.ts" + ], + "internalConsoleOptions": "openOnSessionStart", + "request": "launch", + "skipFiles": [ + "/**" + ], + }, + { + "name": "Test: Selected Test Case", + "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", + "type": "node", + "args": [ + "--timeout", + "100000", + "-r", + "test/mocha.env.ts", + "-r", + "ts-node/register", + "-r", + "esm", + "-r", + "dotenv/config", + "-r", + "./test/public/common/setup.ts", + "--colors", + "**/${fileBasenameNoExtension}.ts", + "-g", + "${selectedText}" + ], + "internalConsoleOptions": "openOnSessionStart", + "request": "launch", + "skipFiles": [ + "/**" + ], + } + ] +} \ No newline at end of file diff --git a/sdk/cosmosdb/cosmos/review/cosmos.api.md b/sdk/cosmosdb/cosmos/review/cosmos.api.md index 4ddd35a31aa9..353568f536c1 100644 --- a/sdk/cosmosdb/cosmos/review/cosmos.api.md +++ b/sdk/cosmosdb/cosmos/review/cosmos.api.md @@ -90,7 +90,7 @@ export class ClientContext { batch({ body, path, partitionKey, resourceId, options, }: { body: T; path: string; - partitionKey: PartitionKey; + partitionKey: string; resourceId: string; options?: RequestOptions; }): Promise>; @@ -585,7 +585,7 @@ export interface CreateOperationInput { // (undocumented) operationType: typeof BulkOperationType.Create; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; // (undocumented) resourceBody: JSONObject; } @@ -695,7 +695,7 @@ export interface DeleteOperationInput { // (undocumented) operationType: typeof BulkOperationType.Delete; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; } // @public (undocumented) @@ -735,10 +735,8 @@ export type ExistingKeyOperation = { path: string; }; -// Warning: (ae-forgotten-export) The symbol "PartitionKeyInternal" needs to be exported by the entry point index.d.ts -// -// @public -export function extractPartitionKey(document: unknown, partitionKeyDefinition?: PartitionKeyDefinition): PartitionKeyInternal | undefined; +// @public (undocumented) +export function extractPartitionKey(document: unknown, partitionKeyDefinition: PartitionKeyDefinition): PartitionKey[]; // @public export interface FeedOptions extends SharedOptions { @@ -755,7 +753,7 @@ export interface FeedOptions extends SharedOptions { forceQueryPlan?: boolean; maxDegreeOfParallelism?: number; maxItemCount?: number; - partitionKey?: PartitionKey; + partitionKey?: any; populateQueryMetrics?: boolean; useIncrementalFeed?: boolean; } @@ -873,7 +871,7 @@ export enum IndexKind { // @public export class Item { - constructor(container: Container, id: string, clientContext: ClientContext, partitionKey?: PartitionKey); + constructor(container: Container, id: string, partitionKey: PartitionKey, clientContext: ClientContext); // (undocumented) readonly container: Container; delete(options?: RequestOptions): Promise>; @@ -903,7 +901,7 @@ export class ItemResponse extends ResourceResponse>; + batch(operations: OperationInput[], partitionKey?: string, options?: RequestOptions): Promise>; bulk(operations: OperationInput[], bulkOptions?: BulkOptions, options?: RequestOptions): Promise; changeFeed(partitionKey: string | number | boolean, changeFeedOptions?: ChangeFeedOptions): ChangeFeedIterator; changeFeed(changeFeedOptions?: ChangeFeedOptions): ChangeFeedIterator; @@ -1079,20 +1077,15 @@ export interface PartitionedQueryExecutionInfo { queryRanges: QueryRange[]; } -// Warning: (ae-forgotten-export) The symbol "PrimitivePartitionKeyValue" needs to be exported by the entry point index.d.ts -// // @public (undocumented) -export type PartitionKey = PrimitivePartitionKeyValue | PrimitivePartitionKeyValue[]; +export type PartitionKey = PartitionKeyDefinition | string | number | unknown; // @public (undocumented) export interface PartitionKeyDefinition { - // Warning: (ae-forgotten-export) The symbol "PartitionKeyKind" needs to be exported by the entry point index.d.ts - kind?: PartitionKeyKind; paths: string[]; // (undocumented) systemKey?: boolean; - // Warning: (ae-forgotten-export) The symbol "PartitionKeyDefinitionVersion" needs to be exported by the entry point index.d.ts - version?: PartitionKeyDefinitionVersion; + version?: number; } // @public (undocumented) @@ -1137,7 +1130,7 @@ export interface PatchOperationInput { // (undocumented) operationType: typeof BulkOperationType.Patch; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; // (undocumented) resourceBody: PatchRequestBody; } @@ -1391,7 +1384,7 @@ export interface ReadOperationInput { // (undocumented) operationType: typeof BulkOperationType.Read; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | boolean | null | Record | undefined; } // @public (undocumented) @@ -1417,7 +1410,7 @@ export interface ReplaceOperationInput { // (undocumented) operationType: typeof BulkOperationType.Replace; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; // (undocumented) resourceBody: JSONObject; } @@ -2024,7 +2017,7 @@ export interface UpsertOperationInput { // (undocumented) operationType: typeof BulkOperationType.Upsert; // (undocumented) - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; // (undocumented) resourceBody: JSONObject; } diff --git a/sdk/cosmosdb/cosmos/src/ClientContext.ts b/sdk/cosmosdb/cosmos/src/ClientContext.ts index 9206c1cd3f39..f72c13940d2b 100644 --- a/sdk/cosmosdb/cosmos/src/ClientContext.ts +++ b/sdk/cosmosdb/cosmos/src/ClientContext.ts @@ -13,13 +13,7 @@ import { Constants, HTTPMethod, OperationType, ResourceType } from "./common/con import { getIdFromLink, getPathFromLink, parseLink } from "./common/helper"; import { StatusCodes, SubStatusCodes } from "./common/statusCodes"; import { Agent, CosmosClientOptions } from "./CosmosClientOptions"; -import { - ConnectionPolicy, - ConsistencyLevel, - DatabaseAccount, - PartitionKey, - convertToInternalPartitionKey, -} from "./documents"; +import { ConnectionPolicy, ConsistencyLevel, DatabaseAccount, PartitionKey } from "./documents"; import { GlobalEndpointManager } from "./globalEndpointManager"; import { PluginConfig, PluginOn, executePlugins } from "./plugins/Plugin"; import { FetchFunctionCallback, SqlQuerySpec } from "./queryExecutionContext"; @@ -606,7 +600,7 @@ export class ClientContext { }: { body: T; path: string; - partitionKey: PartitionKey; + partitionKey: string; resourceId: string; options?: RequestOptions; }): Promise> { @@ -763,16 +757,12 @@ export class ClientContext { options: requestContext.options, partitionKeyRangeId: requestContext.partitionKeyRangeId, useMultipleWriteLocations: this.connectionPolicy.useMultipleWriteLocations, - partitionKey: - requestContext.partitionKey !== undefined - ? convertToInternalPartitionKey(requestContext.partitionKey) - : undefined, // TODO: Move this check from here to PartitionKey + partitionKey: requestContext.partitionKey, }); } /** - * Returns collection of properties which are derived from the context for Request Creation. - * These properties have client wide scope, as opposed to request specific scope. + * Returns collection of properties which are derived from the context for Request Creation * @returns */ private getContextDerivedPropsForRequestCreation(): { diff --git a/sdk/cosmosdb/cosmos/src/client/Container/Container.ts b/sdk/cosmosdb/cosmos/src/client/Container/Container.ts index df57a0c25af2..ef671abd2031 100644 --- a/sdk/cosmosdb/cosmos/src/client/Container/Container.ts +++ b/sdk/cosmosdb/cosmos/src/client/Container/Container.ts @@ -107,7 +107,7 @@ export class Container { * `const {body: replacedItem} = await container.item("", "").replace({id: "", title: "Updated post", authorID: 5});` */ public item(id: string, partitionKeyValue?: PartitionKey): Item { - return new Item(this, id, this.clientContext, partitionKeyValue); + return new Item(this, id, partitionKeyValue, this.clientContext); } /** diff --git a/sdk/cosmosdb/cosmos/src/client/Item/Item.ts b/sdk/cosmosdb/cosmos/src/client/Item/Item.ts index 10255933e6d9..d402f3687cc1 100644 --- a/sdk/cosmosdb/cosmos/src/client/Item/Item.ts +++ b/sdk/cosmosdb/cosmos/src/client/Item/Item.ts @@ -9,7 +9,7 @@ import { ResourceType, StatusCodes, } from "../../common"; -import { PartitionKey, PartitionKeyInternal, convertToInternalPartitionKey } from "../../documents"; +import { PartitionKey } from "../../documents"; import { extractPartitionKey, undefinedPartitionKey } from "../../extractPartitionKey"; import { RequestOptions, Response } from "../../request"; import { PatchRequestBody } from "../../utils/patch"; @@ -24,7 +24,7 @@ import { ItemResponse } from "./ItemResponse"; * @see {@link Items} for operations on all items; see `container.items`. */ export class Item { - private partitionKey: PartitionKeyInternal; + private partitionKey: PartitionKey; /** * Returns a reference URL to the resource. Used for linking in Permissions. */ @@ -41,11 +41,10 @@ export class Item { constructor( public readonly container: Container, public readonly id: string, - private readonly clientContext: ClientContext, - partitionKey?: PartitionKey + partitionKey: PartitionKey, + private readonly clientContext: ClientContext ) { - this.partitionKey = - partitionKey === undefined ? undefined : convertToInternalPartitionKey(partitionKey); + this.partitionKey = partitionKey; } /** diff --git a/sdk/cosmosdb/cosmos/src/client/Item/Items.ts b/sdk/cosmosdb/cosmos/src/client/Item/Items.ts index bed14a38188b..371e40f7a8e1 100644 --- a/sdk/cosmosdb/cosmos/src/client/Item/Items.ts +++ b/sdk/cosmosdb/cosmos/src/client/Item/Items.ts @@ -17,15 +17,16 @@ import { ItemResponse } from "./ItemResponse"; import { Batch, isKeyInRange, - prepareOperations, + Operation, + getPartitionKeyToHash, + decorateOperation, OperationResponse, OperationInput, BulkOptions, decorateBatchOperation, } from "../../utils/batch"; -import { assertNotUndefined } from "../../utils/typeChecks"; -import { hashPartitionKey } from "../../utils/hashing/hash"; -import { PartitionKey, PartitionKeyDefinition } from "../../documents"; +import { hashV1PartitionKey } from "../../utils/hashing/v1"; +import { hashV2PartitionKey } from "../../utils/hashing/v2"; /** * @hidden @@ -287,8 +288,8 @@ export class Items { const ref = new Item( this.container, (response.result as any).id, - this.clientContext, - partitionKey + partitionKey, + this.clientContext ); return new ItemResponse( response.result, @@ -359,8 +360,8 @@ export class Items { const ref = new Item( this.container, (response.result as any).id, - this.clientContext, - partitionKey + partitionKey, + this.clientContext ); return new ItemResponse( response.result, @@ -407,8 +408,7 @@ export class Items { const { resources: partitionKeyRanges } = await this.container .readPartitionKeyRanges() .fetchAll(); - const { resource } = await this.container.readPartitionKeyDefinition(); - const partitionDefinition = assertNotUndefined(resource, "PartitionKeyDefinition."); + const { resource: definition } = await this.container.getPartitionKeyDefinition(); const batches: Batch[] = partitionKeyRanges.map((keyRange: PartitionKeyRange) => { return { min: keyRange.minInclusive, @@ -418,8 +418,19 @@ export class Items { operations: [], }; }); - - this.groupOperationsBasedOnPartitionKey(operations, partitionDefinition, options, batches); + operations + .map((operation) => decorateOperation(operation, definition, options)) + .forEach((operation: Operation, index: number) => { + const partitionProp = definition.paths[0].replace("/", ""); + const isV2 = definition.version && definition.version === 2; + const toHashKey = getPartitionKeyToHash(operation, partitionProp); + const hashed = isV2 ? hashV2PartitionKey(toHashKey) : hashV1PartitionKey(toHashKey); + const batchForKey = batches.find((batch: Batch) => { + return isKeyInRange(batch.min, batch.max, hashed); + }); + batchForKey.operations.push(operation); + batchForKey.indexes.push(index); + }); const path = getPathFromLink(this.container.url, ResourceType.item); @@ -449,8 +460,7 @@ export class Items { // partition key types as well since we don't support them, so for now we throw if (err.code === 410) { throw new Error( - "Partition key error. Either the partitions have split or an operation has an unsupported partitionKey type" + - err.message + "Partition key error. Either the partitions have split or an operation has an unsupported partitionKey type" ); } throw new Error(`Bulk request errored with: ${err.message}`); @@ -460,43 +470,6 @@ export class Items { return orderedResponses; } - /** - * Function to create batches based of partition key Ranges. - * @param operations - operations to group - * @param partitionDefinition - PartitionKey definition of container. - * @param options - Request options for bulk request. - * @param batches - Groups to be filled with operations. - */ - private groupOperationsBasedOnPartitionKey( - operations: OperationInput[], - partitionDefinition: PartitionKeyDefinition, - options: RequestOptions | undefined, - batches: Batch[] - ) { - operations.forEach((operationInput, index: number) => { - const { operation, partitionKey } = prepareOperations( - operationInput, - partitionDefinition, - options - ); - const hashed = hashPartitionKey( - assertNotUndefined( - partitionKey, - "undefined value for PartitionKey not expected during grouping of bulk operations." - ), - partitionDefinition - ); - const batchForKey = assertNotUndefined( - batches.find((batch: Batch) => { - return isKeyInRange(batch.min, batch.max, hashed); - }), - "No suitable Batch found." - ); - batchForKey.operations.push(operation); - batchForKey.indexes.push(index); - }); - } - /** * Execute transactional batch operations on items. * @@ -526,7 +499,7 @@ export class Items { */ public async batch( operations: OperationInput[], - partitionKey?: PartitionKey, + partitionKey: string = "[{}]", options?: RequestOptions ): Promise> { operations.map((operation) => decorateBatchOperation(operation, options)); diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts index 43bb0eafe83a..be19cd7d496d 100644 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts @@ -1,50 +1,5 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { PartitionKeyDefinition } from "./PartitionKeyDefinition"; -import { NonePartitionKeyLiteral, NullPartitionKeyLiteral } from "./PartitionKeyInternal"; - -export type PartitionKey = PrimitivePartitionKeyValue | PrimitivePartitionKeyValue[]; - -/** - * A primitive Partition Key value. - */ -export type PrimitivePartitionKeyValue = - | string - | number - | boolean - | NullPartitionKeyType - | NonePartitionKeyType; - -/** - * The returned object represents a partition key value that allows creating and accessing items - * with a null value for the partition key. - */ -export type NullPartitionKeyType = null; - -/** - * The returned object represents a partition key value that allows creating and accessing items - * without a value for partition key - */ -export type NonePartitionKeyType = { - [K in any]: never; -}; - -/** - * Builder class for building PartitionKey. - */ -export class PartitionKeyBuilder { - readonly values: PrimitivePartitionKeyValue[] = []; - public addValue(value: string | boolean | number): PartitionKeyBuilder { - this.values.push(value); - return this; - } - public addNullValue(): void { - this.values.push(NullPartitionKeyLiteral); - } - public addNoneValue(): void { - this.values.push(NonePartitionKeyLiteral); - } - public build(): PartitionKey { - return [...this.values]; - } -} +export type PartitionKey = PartitionKeyDefinition | string | number | unknown; diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts index b4186e04bc9c..983bb837236c 100644 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts @@ -1,8 +1,5 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { PartitionKeyDefinitionVersion } from "./PartitionKeyDefinitionVersion"; -import { PartitionKeyKind } from "./PartitionKeyKind"; - export interface PartitionKeyDefinition { /** * An array of paths for which data within the collection can be partitioned. Paths must not contain a wildcard or @@ -14,10 +11,6 @@ export interface PartitionKeyDefinition { * An optional field, if not specified the default value is 1. To use the large partition key set the version to 2. * To learn about large partition keys, see [how to create containers with large partition key](https://docs.microsoft.com/en-us/azure/cosmos-db/large-partition-keys) article. */ - version?: PartitionKeyDefinitionVersion; + version?: number; systemKey?: boolean; - /** - * What kind of partition key is being defined (default: "Hash") - */ - kind?: PartitionKeyKind; } diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts deleted file mode 100644 index 9021aff9dd21..000000000000 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * PartitionKey Definition Version - */ -export enum PartitionKeyDefinitionVersion { - V1 = 1, - V2 = 2, -} diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts deleted file mode 100644 index 9c4267309fea..000000000000 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - NonePartitionKeyType, - NullPartitionKeyType, - PartitionKey, - PrimitivePartitionKeyValue, -} from "./PartitionKey"; - -/** - * @hidden - * Internal Representation Of Partition Key. TODO: Make sure {@link ClientContext} working with only {@link PartitionKeyInternal} - */ -export type PartitionKeyInternal = PrimitivePartitionKeyValue[]; -/** - * @hidden - * None PartitionKey Literal - */ -export const NonePartitionKeyLiteral: NonePartitionKeyType = {}; -/** - * @hidden - * Null PartitionKey Literal - */ -export const NullPartitionKeyLiteral: NullPartitionKeyType = null; -/** - * @hidden - * Maps PartitionKey to InternalPartitionKey. - * @param partitionKey - PartitonKey to be converted. - * @returns PartitionKeyInternal - */ -export function convertToInternalPartitionKey(partitionKey: PartitionKey): PartitionKeyInternal { - if (Array.isArray(partitionKey)) return partitionKey; - else return [partitionKey]; -} diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts deleted file mode 100644 index b8303e237dc3..000000000000 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Type of PartitionKey i.e. Hash, MultiHash - */ -export enum PartitionKeyKind { - Hash = "Hash", - MultiHash = "MultiHash", -} diff --git a/sdk/cosmosdb/cosmos/src/documents/index.ts b/sdk/cosmosdb/cosmos/src/documents/index.ts index 46df0c1110b4..0c6afe336af8 100644 --- a/sdk/cosmosdb/cosmos/src/documents/index.ts +++ b/sdk/cosmosdb/cosmos/src/documents/index.ts @@ -10,9 +10,6 @@ export * from "./IndexingMode"; export * from "./IndexingPolicy"; export * from "./IndexKind"; export * from "./PartitionKey"; -export * from "./PartitionKeyInternal"; -export * from "./PartitionKeyDefinitionVersion"; -export * from "./PartitionKeyKind"; export * from "./PartitionKeyDefinition"; export * from "./PermissionMode"; export * from "./TriggerOperation"; diff --git a/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts b/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts index 66cdec2d8a01..e4ef2c425742 100644 --- a/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts +++ b/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts @@ -1,78 +1,47 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { AzureLogger, createClientLogger } from "@azure/logger"; import { parsePath } from "./common"; -import { - NonePartitionKeyLiteral, - NullPartitionKeyLiteral, - PartitionKeyDefinition, - PartitionKeyInternal, - PrimitivePartitionKeyValue, -} from "./documents"; - -const logger: AzureLogger = createClientLogger("extractPartitionKey"); +import { PartitionKey, PartitionKeyDefinition } from "./documents"; /** - * Function to extract PartitionKey based on {@link PartitionKeyDefinition} - * from an object. - * Retuns - * 1. PartitionKeyInternal[] if extraction is successful. - * 2. undefined if either {@link partitionKeyDefinition} is not well formed - * or an unsupported partitionkey type is encountered. * @hidden */ export function extractPartitionKey( document: unknown, - partitionKeyDefinition?: PartitionKeyDefinition -): PartitionKeyInternal | undefined { + partitionKeyDefinition: PartitionKeyDefinition +): PartitionKey[] { if ( partitionKeyDefinition && partitionKeyDefinition.paths && partitionKeyDefinition.paths.length > 0 ) { - const partitionKeys: PrimitivePartitionKeyValue[] = []; + const partitionKey: PartitionKey[] = []; partitionKeyDefinition.paths.forEach((path: string) => { - const pathParts: string[] = parsePath(path); + const pathParts = parsePath(path); let obj = document; for (const part of pathParts) { - if (typeof obj === "object" && obj !== null && part in obj) { + if (typeof obj === "object" && part in obj) { obj = (obj as Record)[part]; } else { obj = undefined; break; } } - if (typeof obj === "string" || typeof obj === "number" || typeof obj === "boolean") { - partitionKeys.push(obj); - } else if (obj === NullPartitionKeyLiteral) { - partitionKeys.push(NullPartitionKeyLiteral); - } else if ( - obj === undefined || - JSON.stringify(obj) === JSON.stringify(NonePartitionKeyLiteral) - ) { - if (partitionKeyDefinition.systemKey === true) { - return []; - } - partitionKeys.push(NonePartitionKeyLiteral); - } else { - logger.warning("Unsupported PartitionKey found."); - return undefined; - } + partitionKey.push(obj); }); - return partitionKeys; + if (partitionKey.length === 1 && partitionKey[0] === undefined) { + return undefinedPartitionKey(partitionKeyDefinition); + } + return partitionKey; } - logger.warning("Unexpected Partition Key Definition Found."); - return undefined; } /** * @hidden */ -export function undefinedPartitionKey( - partitionKeyDefinition: PartitionKeyDefinition -): PartitionKeyInternal { +export function undefinedPartitionKey(partitionKeyDefinition: PartitionKeyDefinition): unknown[] { if (partitionKeyDefinition.systemKey === true) { return []; } else { - return partitionKeyDefinition.paths.map(() => NonePartitionKeyLiteral); + return [{}]; } } diff --git a/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts b/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts index 2e7e32fa9f2c..5650bd4ebb5e 100644 --- a/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts +++ b/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts @@ -1,6 +1,5 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { PartitionKey } from "../documents"; import { SharedOptions } from "./SharedOptions"; /** @@ -96,5 +95,5 @@ export interface FeedOptions extends SharedOptions { * The former is useful when the query body is out of your control * but you still want to restrict it to a single partition. Example: an end user specified query. */ - partitionKey?: PartitionKey; + partitionKey?: any; } diff --git a/sdk/cosmosdb/cosmos/src/request/request.ts b/sdk/cosmosdb/cosmos/src/request/request.ts index b680398f8821..65a1e42daf74 100644 --- a/sdk/cosmosdb/cosmos/src/request/request.ts +++ b/sdk/cosmosdb/cosmos/src/request/request.ts @@ -3,7 +3,7 @@ import { setAuthorizationHeader } from "../auth"; import { Constants, HTTPMethod, jsonStringifyAndEscapeNonASCII, ResourceType } from "../common"; import { CosmosClientOptions } from "../CosmosClientOptions"; -import { PartitionKeyInternal } from "../documents"; +import { PartitionKey } from "../documents"; import { CosmosHeaders } from "../queryExecutionContext"; import { FeedOptions, RequestOptions } from "./index"; import { defaultLogger } from "../common/logger"; @@ -41,7 +41,7 @@ interface GetHeadersOptions { options: RequestOptions & FeedOptions; partitionKeyRangeId?: string; useMultipleWriteLocations?: boolean; - partitionKey?: PartitionKeyInternal; + partitionKey?: PartitionKey; } const JsonContentType = "application/json"; @@ -168,6 +168,9 @@ export async function getHeaders({ } if (partitionKey !== undefined && !headers[Constants.HttpHeaders.PartitionKey]) { + if (partitionKey === null || !Array.isArray(partitionKey)) { + partitionKey = [partitionKey as string]; + } headers[Constants.HttpHeaders.PartitionKey] = jsonStringifyAndEscapeNonASCII(partitionKey); } diff --git a/sdk/cosmosdb/cosmos/src/utils/batch.ts b/sdk/cosmosdb/cosmos/src/utils/batch.ts index d90a7dc01ae7..537d2f2f3bad 100644 --- a/sdk/cosmosdb/cosmos/src/utils/batch.ts +++ b/sdk/cosmosdb/cosmos/src/utils/batch.ts @@ -3,17 +3,10 @@ import { JSONObject } from "../queryExecutionContext"; import { extractPartitionKey } from "../extractPartitionKey"; -import { - NonePartitionKeyLiteral, - PartitionKey, - PartitionKeyDefinition, - PrimitivePartitionKeyValue, - convertToInternalPartitionKey, -} from "../documents"; +import { PartitionKeyDefinition } from "../documents"; import { RequestOptions } from ".."; import { PatchRequestBody } from "./patch"; import { v4 } from "uuid"; -import { assertNotUndefined } from "./typeChecks"; const uuid = v4; export type Operation = @@ -77,7 +70,7 @@ export type OperationInput = | PatchOperationInput; export interface CreateOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Create; @@ -85,7 +78,7 @@ export interface CreateOperationInput { } export interface UpsertOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Upsert; @@ -93,19 +86,19 @@ export interface UpsertOperationInput { } export interface ReadOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | boolean | null | Record | undefined; operationType: typeof BulkOperationType.Read; id: string; } export interface DeleteOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; operationType: typeof BulkOperationType.Delete; id: string; } export interface ReplaceOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Replace; @@ -114,7 +107,7 @@ export interface ReplaceOperationInput { } export interface PatchOperationInput { - partitionKey?: PartitionKey; + partitionKey?: string | number | null | Record | undefined; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Patch; @@ -162,77 +155,59 @@ export function hasResource( (operation as OperationWithItem).resourceBody !== undefined ); } -/** - * Maps OperationInput to Operation by - * - generating Ids if needed. - * - choosing partitionKey which can be used to choose which batch this - * operation should be part of. The order is - - * 1. If the operationInput itself has partitionKey field set it is used. - * 2. Other wise for create/replace/upsert it is extracted from resource body. - * 3. For read/delete/patch type operations undefined partitionKey is used. - * - Here one nuance is that, the partitionKey field inside Operation needs to - * be serialized as a JSON string. - * @param operationInput - OperationInput - * @param definition - PartitionKeyDefinition - * @param options - RequestOptions - * @returns - */ -export function prepareOperations( - operationInput: OperationInput, - definition: PartitionKeyDefinition, - options: RequestOptions = {} -): { - operation: Operation; - partitionKey: PrimitivePartitionKeyValue[]; -} { - populateIdsIfNeeded(operationInput, options); - - let partitionKey: PrimitivePartitionKeyValue[]; - if (Object.prototype.hasOwnProperty.call(operationInput, "partitionKey")) { - if (operationInput.partitionKey === undefined) { - partitionKey = definition.paths.map(() => NonePartitionKeyLiteral); - } else { - partitionKey = convertToInternalPartitionKey(operationInput.partitionKey); - } - } else { - switch (operationInput.operationType) { - case BulkOperationType.Create: - case BulkOperationType.Replace: - case BulkOperationType.Upsert: - partitionKey = assertNotUndefined( - extractPartitionKey(operationInput.resourceBody, definition), - "" - ); - break; - case BulkOperationType.Read: - case BulkOperationType.Delete: - case BulkOperationType.Patch: - partitionKey = definition.paths.map(() => NonePartitionKeyLiteral); - } + +export function getPartitionKeyToHash(operation: Operation, partitionProperty: string): any { + const toHashKey = hasResource(operation) + ? deepFind(operation.resourceBody, partitionProperty) + : (operation.partitionKey && operation.partitionKey.replace(/[[\]"']/g, "")) || + operation.partitionKey; + // We check for empty object since replace will stringify the value + // The second check avoids cases where the partitionKey value is actually the string '{}' + if (toHashKey === "{}" && operation.partitionKey === "[{}]") { + return {}; + } + if (toHashKey === "null" && operation.partitionKey === "[null]") { + return null; } - return { - operation: { ...operationInput, partitionKey: JSON.stringify(partitionKey) } as Operation, - partitionKey, - }; + if (toHashKey === "0" && operation.partitionKey === "[0]") { + return 0; + } + return toHashKey; } -/** - * For operations requiring Id genrate random uuids. - * @param operationInput - OperationInput to be checked. - * @param options - RequestOptions - */ -function populateIdsIfNeeded(operationInput: OperationInput, options: RequestOptions) { +export function decorateOperation( + operation: OperationInput, + definition: PartitionKeyDefinition, + options: RequestOptions = {} +): Operation { if ( - operationInput.operationType === BulkOperationType.Create || - operationInput.operationType === BulkOperationType.Upsert + operation.operationType === BulkOperationType.Create || + operation.operationType === BulkOperationType.Upsert ) { if ( - (operationInput.resourceBody.id === undefined || operationInput.resourceBody.id === "") && + (operation.resourceBody.id === undefined || operation.resourceBody.id === "") && !options.disableAutomaticIdGeneration ) { - operationInput.resourceBody.id = uuid(); + operation.resourceBody.id = uuid(); } } + if ("partitionKey" in operation) { + const extracted = extractPartitionKey(operation, { paths: ["/partitionKey"] }); + return { ...operation, partitionKey: JSON.stringify(extracted) } as Operation; + } else if ( + operation.operationType === BulkOperationType.Create || + operation.operationType === BulkOperationType.Replace || + operation.operationType === BulkOperationType.Upsert + ) { + const pk = extractPartitionKey(operation.resourceBody, definition); + return { ...operation, partitionKey: JSON.stringify(pk) } as Operation; + } else if ( + operation.operationType === BulkOperationType.Read || + operation.operationType === BulkOperationType.Delete + ) { + return { ...operation, partitionKey: "[{}]" }; + } + return operation as Operation; } export function decorateBatchOperation( @@ -252,3 +227,19 @@ export function decorateBatchOperation( } return operation as Operation; } +/** + * Util function for finding partition key values nested in objects at slash (/) separated paths + * @hidden + */ +export function deepFind(document: T, path: P): string | JSONObject { + const apath = path.split("/"); + let h: any = document; + for (const p of apath) { + if (p in h) h = h[p]; + else { + console.warn(`Partition key not found, using undefined: ${path} at ${p}`); + return "{}"; + } + } + return h; +} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts deleted file mode 100644 index d6375ae14f50..000000000000 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - PartitionKeyDefinition, - PartitionKeyDefinitionVersion, - PartitionKeyKind, - PrimitivePartitionKeyValue, -} from "../../documents"; -import { hashMultiHashPartitionKey } from "./multiHash"; -import { hashV1PartitionKey } from "./v1"; -import { hashV2PartitionKey } from "./v2"; - -/** - * Generate hash of a PartitonKey based on it PartitionKeyDefinition. - * @param partitionKey - to be hashed. - * @param partitionDefinition - container's partitionKey definition - * @returns - */ -export function hashPartitionKey( - partitionKey: PrimitivePartitionKeyValue[], - partitionDefinition: PartitionKeyDefinition -): string { - const kind: PartitionKeyKind = partitionDefinition?.kind || PartitionKeyKind.Hash; // Default value. - const isV2 = - partitionDefinition && - partitionDefinition.version && - partitionDefinition.version === PartitionKeyDefinitionVersion.V2; - switch (kind) { - case PartitionKeyKind.Hash: - return isV2 ? hashV2PartitionKey(partitionKey) : hashV1PartitionKey(partitionKey); - case PartitionKeyKind.MultiHash: - return hashMultiHashPartitionKey(partitionKey); - } -} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts deleted file mode 100644 index 5c03f80616ff..000000000000 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -import { PrimitivePartitionKeyValue } from "../../documents"; -import { hashV2PartitionKey } from "./v2"; - -/** - * Generate Hash for a `Multi Hash` type partition. - * @param partitionKey - to be hashed. - * @returns - */ -export function hashMultiHashPartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { - return partitionKey.map((keys) => hashV2PartitionKey([keys])).join(""); -} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts index 0d433b1d4692..66794b8a9924 100644 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts @@ -5,21 +5,20 @@ import { doubleToByteArrayJSBI, writeNumberForBinaryEncodingJSBI } from "./encod import { writeStringForBinaryEncoding } from "./encoding/string"; import { BytePrefix } from "./encoding/prefix"; import MurmurHash from "./murmurHash"; -import { PrimitivePartitionKeyValue } from "../../documents"; const MAX_STRING_CHARS = 100; -export function hashV1PartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { - const key = partitionKey[0]; - const toHash = prefixKeyByType(key); +type v1Key = string | number | boolean | null | Record | undefined; + +export function hashV1PartitionKey(partitionKey: v1Key): string { + const toHash = prefixKeyByType(partitionKey); const hash = MurmurHash.x86.hash32(toHash); const encodedJSBI = writeNumberForBinaryEncodingJSBI(hash); - const encodedValue = encodeByType(key); - const finalHash = Buffer.concat([encodedJSBI, encodedValue]).toString("hex").toUpperCase(); - return finalHash; + const encodedValue = encodeByType(partitionKey); + return Buffer.concat([encodedJSBI, encodedValue]).toString("hex").toUpperCase(); } -function prefixKeyByType(key: PrimitivePartitionKeyValue): Buffer { +function prefixKeyByType(key: v1Key): Buffer { let bytes: Buffer; switch (typeof key) { case "string": { @@ -54,7 +53,7 @@ function prefixKeyByType(key: PrimitivePartitionKeyValue): Buffer { } } -function encodeByType(key: PrimitivePartitionKeyValue): Buffer { +function encodeByType(key: v1Key): Buffer { switch (typeof key) { case "string": { const truncated = key.substr(0, MAX_STRING_CHARS); diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts index d873505df467..d2138195ad7f 100644 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts @@ -1,20 +1,21 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { PrimitivePartitionKeyValue } from "../../documents"; import { doubleToByteArrayJSBI } from "./encoding/number"; import { BytePrefix } from "./encoding/prefix"; import MurmurHash from "./murmurHash"; -export function hashV2PartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { - const toHash: Buffer = Buffer.concat(partitionKey.map(prefixKeyByType)); +type v2Key = string | number | boolean | null | Record | undefined; + +export function hashV2PartitionKey(partitionKey: v2Key): string { + const toHash = prefixKeyByType(partitionKey); const hash = MurmurHash.x64.hash128(toHash); const reverseBuff: Buffer = reverse(Buffer.from(hash, "hex")); reverseBuff[0] &= 0x3f; return reverseBuff.toString("hex").toUpperCase(); } -function prefixKeyByType(key: PrimitivePartitionKeyValue): Buffer { +function prefixKeyByType(key: v2Key): Buffer { let bytes: Buffer; switch (typeof key) { case "string": { diff --git a/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts b/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts deleted file mode 100644 index e10fe83a47e0..000000000000 --- a/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -/** - * A type which could be any type but undefined - */ -export type NonUndefinable = T extends undefined ? never : T; - -/** - * Utility function to avoid writing boilder plate code while checking for - * undefined values. It throws Error if the input value is undefined. - * @param value - Value which is potentially undefined. - * @param msg - Error Message to throw if value is undefined. - * @returns - */ -export function assertNotUndefined(value: T, msg?: string): NonUndefinable { - if (value !== undefined) { - return value as NonUndefinable; - } - throw new Error(msg || "Unexpected 'undefined' value encountered"); -} diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts index dd2a1fdc0377..5a72780dfe53 100644 --- a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts +++ b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts @@ -8,58 +8,56 @@ describe("effectivePartitionKey", function () { describe("computes v1 key", function () { const toMatch = [ { - key: ["partitionKey"], + key: "partitionKey", output: "05C1E1B3D9CD2608716273756A756A706F4C667A00", }, { - key: ["redmond"], + key: "redmond", output: "05C1EFE313830C087366656E706F6500", }, { - key: [ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - ], + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", output: "05C1EB5921F706086262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626200", }, { - key: [""], + key: "", output: "05C1CF33970FF80800", }, { - key: ["aa"], + key: "aa", output: "05C1C7B7270FE008626200", }, { - key: [null], + key: null, output: "05C1ED45D7475601", }, { - key: [true], + key: true, output: "05C1D7C5A903D803", }, { - key: [false], + key: false, output: "05C1DB857D857C02", }, { - key: [{}], + key: {}, output: "05C1D529E345DC00", }, { - key: [5], + key: 5, output: "05C1D9C1C5517C05C014", }, { - key: [5.5], + key: 5.5, output: "05C1D7A771716C05C016", }, { - key: [12313.1221], + key: 12313.1221, output: "05C1ED154D592E05C0C90723F50FC925D8", }, { - key: [123456789], + key: 123456789, output: "05C1D9E1A5311C05C19DB7CD8B40", }, ]; diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts index 62e8f933799c..b1634cfc3506 100644 --- a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts +++ b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts @@ -8,53 +8,51 @@ describe("effectivePartitionKey", function () { describe("computes v2 key", function () { const toMatch = [ { - key: ["redmond"], + key: "redmond", output: "22E342F38A486A088463DFF7838A5963", }, { - key: [ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - ], + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", output: "0BA3E9CA8EE4C14538828D1612A4B652", }, { - key: [""], + key: "", output: "32E9366E637A71B4E710384B2F4970A0", }, { - key: ["aa"], + key: "aa", output: "05033626483AE80D00E44FBD35362B19", }, { - key: [null], + key: null, output: "378867E4430E67857ACE5C908374FE16", }, { - key: [true], + key: true, output: "0E711127C5B5A8E4726AC6DD306A3E59", }, { - key: [false], + key: false, output: "2FE1BE91E90A3439635E0E9E37361EF2", }, { - key: [{}], + key: {}, output: "11622DAA78F835834610ABE56EFF5CB5", }, { - key: [5], + key: 5, output: "19C08621B135968252FB34B4CF66F811", }, { - key: [5.5], + key: 5.5, output: "0E2EE47829D1AF775EEFB6540FD1D0ED", }, { - key: [12313.1221], + key: 12313.1221, output: "27E7ECA8F2EE3E53424DE8D5220631C6", }, { - key: [123456789], + key: 123456789, output: "1F56D2538088EBA82CCF988F36E16760", }, ]; diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts new file mode 100644 index 000000000000..f9af90f73958 --- /dev/null +++ b/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from "assert"; +import { deepFind } from "../../../../src/utils/batch"; + +describe("batch utils", function () { + it("deep finds nested partition key values in objects", function () { + const testTwiceNested = { + nested: { + nested2: { + key: "value", + }, + }, + }; + const testNested = { + nested: { + key: "value", + }, + }; + const testBase = { + key: "value", + }; + assert.equal(deepFind(testNested, "nested/key"), "value"); + assert.equal(deepFind(testBase, "key"), "value"); + assert.equal(deepFind(testTwiceNested, "nested/nested2/key"), "value"); + }); +}); diff --git a/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts b/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts index 4b11c6ecc94a..388992e35b66 100644 --- a/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts +++ b/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts @@ -6,9 +6,6 @@ import { CosmosClient, Database, DatabaseDefinition, - extractPartitionKey, - PartitionKey, - PartitionKeyDefinition, PermissionDefinition, RequestOptions, Response, @@ -104,11 +101,13 @@ export async function bulkInsertItems( export async function bulkReadItems( container: Container, documents: any[], - partitionKeyDef: PartitionKeyDefinition + partitionKeyProperty: string ): Promise { return Promise.all( documents.map(async (document) => { - const partitionKey = extractPartitionKey(document, partitionKeyDef); + const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) + ? document[partitionKeyProperty] + : undefined; // TODO: should we block or do all requests in parallel? const { resource: doc } = await container.item(document.id, partitionKey).read(); @@ -120,11 +119,13 @@ export async function bulkReadItems( export async function bulkReplaceItems( container: Container, documents: any[], - partitionKeyDef: PartitionKeyDefinition + partitionKeyProperty: string ): Promise { return Promise.all( documents.map(async (document) => { - const partitionKey = extractPartitionKey(document, partitionKeyDef); + const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) + ? document[partitionKeyProperty] + : undefined; const { resource: doc } = await container.item(document.id, partitionKey).replace(document); const { _etag: _1, _ts: _2, ...expectedModifiedDocument } = document; // eslint-disable-line @typescript-eslint/no-unused-vars const { _etag: _4, _ts: _3, ...actualModifiedDocument } = doc; // eslint-disable-line @typescript-eslint/no-unused-vars @@ -137,11 +138,13 @@ export async function bulkReplaceItems( export async function bulkDeleteItems( container: Container, documents: any[], - partitionKeyDef: PartitionKeyDefinition + partitionKeyProperty: string ): Promise { await Promise.all( documents.map(async (document) => { - const partitionKey = extractPartitionKey(document, partitionKeyDef); + const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) + ? document[partitionKeyProperty] + : undefined; await container.item(document.id, partitionKey).delete(); }) @@ -151,31 +154,25 @@ export async function bulkDeleteItems( export async function bulkQueryItemsWithPartitionKey( container: Container, documents: any[], - query: string, - parameterGenerator: (doc: any) => { name: string; value: any }[] + partitionKeyPropertyName: string ): Promise { for (const document of documents) { - const parameters = parameterGenerator(document); - const shouldSkip = parameters.reduce( - (previous, current) => previous || current["value"] === undefined, - false - ); - if (shouldSkip) { + if (!Object.prototype.hasOwnProperty.call(document, partitionKeyPropertyName)) { continue; } + const querySpec = { - query: query, - parameters: parameters, + query: "SELECT * FROM root r WHERE r." + partitionKeyPropertyName + "=@key", + parameters: [ + { + name: "@key", + value: document[partitionKeyPropertyName], + }, + ], }; const { resources } = await container.items.query(querySpec).fetchAll(); - assert.equal( - resources.length, - 1, - `Expected exactly 1 document, doc: ${JSON.stringify( - document - )}, query: '${query}', parameters: ${JSON.stringify(parameters)}` - ); + assert.equal(resources.length, 1, "Expected exactly 1 document"); assert.equal(JSON.stringify(resources[0]), JSON.stringify(document)); } } @@ -198,14 +195,13 @@ export async function replaceOrUpsertItem( container: Container, body: unknown, options: RequestOptions, - isUpsertTest: boolean, - partitionKey?: PartitionKey + isUpsertTest: boolean ): Promise> { if (isUpsertTest) { return container.items.upsert(body, options); } else { const bodyWithId = body as { id: string }; - return container.item(bodyWithId.id, partitionKey).replace(body, options); + return container.item(bodyWithId.id, undefined).replace(body, options); } } diff --git a/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts b/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts index 95c0d58ddcb6..c9c0a4884ea5 100644 --- a/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts +++ b/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts @@ -3,15 +3,11 @@ import assert from "assert"; import { Suite } from "mocha"; import { - BulkOptions, Container, - ContainerDefinition, - ContainerRequest, CosmosClient, OperationResponse, PatchOperation, PatchOperationType, - RequestOptions, } from "../../../src"; import { ItemDefinition } from "../../../src"; import { @@ -30,205 +26,58 @@ import { import { BulkOperationType, OperationInput } from "../../../src"; import { endpoint } from "../common/_testConfig"; import { masterKey } from "../common/_fakeTestSecrets"; -import { - PartitionKey, - PartitionKeyDefinition, - PartitionKeyDefinitionVersion, - PartitionKeyKind, -} from "../../../src/documents"; - -/** - * Tests Item api. - * Nomenclature - * V1 Container - containerDefinition.partitionKey.version is undefined or 1 - * V2 Container - containerDefinition.partitionKey.version is 2 - * Single Partition Container - Container with only one physical partition. - * Multi Partition Container - Container with more than one physical partition. - * Hierarchical Partition Container - Container with more than one level of hierarchy of Partition Keys i.e. ['key1', 'key2'] - * Nested Partition Key - Partition Key composed of value which is nested in the document. i.e ['/Address/Zip'] - */ interface TestItem { id?: string; name?: string; foo?: string; - key?: string | number | boolean; - key2?: string | number | boolean; + key?: string; replace?: string; - nested1?: { - nested2: { - nested3: string | number | boolean; - }; - }; - prop?: number; -} - -type CRUDTestDataSet = { - // Container to create. - containerDef: ContainerDefinition; - // item to create - itemDef: TestItem; - // item to replace it with - replaceItemDef: TestItem; - // Partition key to use for operations on original item - originalItemPartitionKey?: PartitionKey; - // Partition key to use for operations on replaced item - replacedItemPartitionKey?: PartitionKey; - propertyToCheck?: string[]; -}; - -function extractNestedPropertyFromObject(obj: any, paths: string[] = []) { - paths.reduce((ob: any, path: string) => { - if (ob !== null && ob !== undefined && typeof ob === "object") { - return ob[path]; - } else { - throw new Error(`The property ${path} doesn't exisit in object`); - } - }, obj); -} - -type MultiCRUDTestDataSet = { - dbName: string; - containerDef: ContainerDefinition; - partitinKeyDef: PartitionKeyDefinition; - containerRequestOps: RequestOptions; - documents: TestItem[]; - singleDocFetchQuery: string; - parameterGenerator: (doc: any) => { name: string; value: any }[]; -}; - -/** - * Helper function to run Create Upsert Read Update Replace Delete operation on Items. - * @param dataset - CRUDTestDataSet - * @param isUpsertTest - is upsert is to be tested instead of create. - */ -async function CRUDTestRunner(dataset: CRUDTestDataSet, isUpsertTest: boolean): Promise { - // create database - const database = await getTestDatabase("sample 中文 database"); - // create container - const { resource: containerdef } = await database.containers.create(dataset.containerDef); - const container: Container = database.container(containerdef.id); - - // read items on empty container - const { resources: items } = await container.items.readAll().fetchAll(); - assert(Array.isArray(items), "Value should be an array"); - - // create an item - const beforeCreateDocumentsCount = items.length; - - const itemDefinition = dataset.itemDef; - try { - await createOrUpsertItem( - container, - itemDefinition, - { disableAutomaticIdGeneration: true }, - isUpsertTest - ); - assert.fail("id generation disabled must throw with invalid id"); - } catch (err: any) { - assert(err !== undefined, "should throw an error because automatic id generation is disabled"); - } - - // create or upsert - const { resource: document } = await createOrUpsertItem( - container, - itemDefinition, - undefined, - isUpsertTest - ); - assert.equal( - extractNestedPropertyFromObject(document, dataset.propertyToCheck), - extractNestedPropertyFromObject(itemDefinition, dataset.propertyToCheck) - ); - assert(document.id !== undefined); - // read documents after creation - const { resources: documents2 } = await container.items.readAll().fetchAll(); - assert.equal( - documents2.length, - beforeCreateDocumentsCount + 1, - "create should increase the number of documents" - ); - // query documents - const querySpec = { - query: "SELECT * FROM root r WHERE r.id=@id", - parameters: [ - { - name: "@id", - value: document.id, - }, - ], - }; - const { resources: results } = await container.items.query(querySpec).fetchAll(); - assert(results.length > 0, "number of results for the query should be > 0"); - const { resources: results2 } = await container.items.query(querySpec).fetchAll(); - assert(results2.length > 0, "number of results for the query should be > 0"); - - // replace document - const replaceDocument: TestItem = { ...dataset.replaceItemDef, id: document.id }; - const { resource: replacedDocument } = await replaceOrUpsertItem( - container, - replaceDocument, - undefined, - isUpsertTest, - dataset.originalItemPartitionKey - ); - assert.equal(replacedDocument.name, replaceDocument.name, "document name property should change"); - assert.equal(replacedDocument.foo, replaceDocument.foo, "property should have changed"); - assert.equal(document.id, replacedDocument.id, "document id should stay the same"); - // read document - const response2 = await container - .item(replacedDocument.id, dataset.replacedItemPartitionKey) - .read(); - const document2 = response2.resource; - assert.equal(replacedDocument.id, document2.id); - assert.equal(typeof response2.requestCharge, "number"); - // delete document - await container.item(replacedDocument.id, dataset.replacedItemPartitionKey).delete(); - - // read documents after deletion - const response = await container.item(replacedDocument.id, undefined).read(); - assert.equal(response.statusCode, 404, "response should return error code 404"); - assert.equal(response.resource, undefined); - - // update document } -describe("Item CRUD hierarchical partition", function (this: Suite) { +describe("Item CRUD", function (this: Suite) { + this.timeout(process.env.MOCHA_TIMEOUT || 10000); beforeEach(async function () { await removeAllDatabases(); }); - it("hierarchycal partitions", async function () { - const dbName = "hierarchical partition db"; - const database = await getTestDatabase(dbName); - const containerDef = { - id: "sample container", - partitionKey: { - paths: ["/foo", "/key"], - version: PartitionKeyDefinitionVersion.V2, - kind: PartitionKeyKind.MultiHash, - }, - }; - const itemDefinition: TestItem = { - name: "sample document", - foo: "bar", - key: "value", - replace: "new property", - }; - - const { resource: containerdef } = await database.containers.create(containerDef); + const documentCRUDTest = async function (isUpsertTest: boolean): Promise { + // create database + const database = await getTestDatabase("sample 中文 database"); + // create container + const { resource: containerdef } = await database.containers.create({ id: "sample container" }); const container: Container = database.container(containerdef.id); // read items const { resources: items } = await container.items.readAll().fetchAll(); assert(Array.isArray(items), "Value should be an array"); + // create an item const beforeCreateDocumentsCount = items.length; - + const itemDefinition: TestItem = { + name: "sample document", + foo: "bar", + key: "value", + replace: "new property", + }; + try { + await createOrUpsertItem( + container, + itemDefinition, + { disableAutomaticIdGeneration: true }, + isUpsertTest + ); + assert.fail("id generation disabled must throw with invalid id"); + } catch (err: any) { + assert( + err !== undefined, + "should throw an error because automatic id generation is disabled" + ); + } const { resource: document } = await createOrUpsertItem( container, itemDefinition, undefined, - false + isUpsertTest ); assert.equal(document.name, itemDefinition.name); assert(document.id !== undefined); @@ -239,29 +88,92 @@ describe("Item CRUD hierarchical partition", function (this: Suite) { beforeCreateDocumentsCount + 1, "create should increase the number of documents" ); + // query documents + const querySpec = { + query: "SELECT * FROM root r WHERE r.id=@id", + parameters: [ + { + name: "@id", + value: document.id, + }, + ], + }; + const { resources: results } = await container.items.query(querySpec).fetchAll(); + assert(results.length > 0, "number of results for the query should be > 0"); + const { resources: results2 } = await container.items.query(querySpec).fetchAll(); + assert(results2.length > 0, "number of results for the query should be > 0"); + + // replace document + document.name = "replaced document"; + document.foo = "not bar"; + const { resource: replacedDocument } = await replaceOrUpsertItem( + container, + document, + undefined, + isUpsertTest + ); + assert.equal( + replacedDocument.name, + "replaced document", + "document name property should change" + ); + assert.equal(replacedDocument.foo, "not bar", "property should have changed"); + assert.equal(document.id, replacedDocument.id, "document id should stay the same"); + // read document + const response2 = await container.item(replacedDocument.id, undefined).read(); + const document2 = response2.resource; + assert.equal(replacedDocument.id, document2.id); + assert.equal(typeof response2.requestCharge, "number"); + // delete document + await container.item(replacedDocument.id, undefined).delete(); + + // read documents after deletion + const response = await container.item(replacedDocument.id, undefined).read(); + assert.equal(response.statusCode, 404, "response should return error code 404"); + assert.equal(response.resource, undefined); + }; + + it("Should do document CRUD operations successfully", async function () { + await documentCRUDTest(false); }); -}); -describe("Create, Upsert, Read, Update, Replace, Delete Operations on Item", function (this: Suite) { - this.timeout(process.env.MOCHA_TIMEOUT || 10000); - beforeEach(async function () { - await removeAllDatabases(); + it("Should do document CRUD operations successfully with upsert", async function () { + await documentCRUDTest(true); }); - async function multipelPartitionCRUDTest(dataset: MultiCRUDTestDataSet): Promise { - const database = await getTestDatabase(dataset.dbName); - const { resource: containerdef } = await database.containers.create( - { ...dataset.containerDef, partitionKey: dataset.partitinKeyDef }, - dataset.containerRequestOps - ); + it("Should do document CRUD operations over multiple partitions", async function () { + // create database + const database = await getTestDatabase("db1"); + const partitionKey = "key"; + + // create container + const containerDefinition = { + id: "coll1", + partitionKey: { paths: ["/" + partitionKey] }, + }; + + const { resource: containerdef } = await database.containers.create(containerDefinition, { + offerThroughput: 12000, + }); const container = database.container(containerdef.id); - let returnedDocuments = await bulkInsertItems(container, dataset.documents); - assert.equal(returnedDocuments.length, dataset.documents.length); + const documents = [ + { id: "document1" }, + { id: "document2", key: null, prop: 1 }, + { id: "document3", key: false, prop: 1 }, + { id: "document4", key: true, prop: 1 }, + { id: "document5", key: 1, prop: 1 }, + { id: "document6", key: "A", prop: 1 }, + { id: "document7", key: "", prop: 1 }, + ]; + + let returnedDocuments = await bulkInsertItems(container, documents); + + assert.equal(returnedDocuments.length, documents.length); returnedDocuments.sort(function (doc1, doc2) { return doc1.id.localeCompare(doc2.id); }); - await bulkReadItems(container, returnedDocuments, dataset.partitinKeyDef); + await bulkReadItems(container, returnedDocuments, partitionKey); const { resources: successDocuments } = await container.items.readAll().fetchAll(); assert(successDocuments !== undefined, "error reading documents"); assert.equal( @@ -277,21 +189,13 @@ describe("Create, Upsert, Read, Update, Replace, Delete Operations on Item", fun JSON.stringify(returnedDocuments), "Unexpected documents are returned" ); + returnedDocuments.forEach(function (document) { document.prop ? ++document.prop : null; // eslint-disable-line no-unused-expressions }); - const newReturnedDocuments = await bulkReplaceItems( - container, - returnedDocuments, - dataset.partitinKeyDef - ); + const newReturnedDocuments = await bulkReplaceItems(container, returnedDocuments, partitionKey); returnedDocuments = newReturnedDocuments; - await bulkQueryItemsWithPartitionKey( - container, - returnedDocuments, - dataset.singleDocFetchQuery, - dataset.parameterGenerator - ); + await bulkQueryItemsWithPartitionKey(container, returnedDocuments, partitionKey); const querySpec = { query: "SELECT * FROM Root", }; @@ -313,1143 +217,520 @@ describe("Create, Upsert, Read, Update, Replace, Delete Operations on Item", fun "Unexpected query results" ); - await bulkDeleteItems(container, returnedDocuments, dataset.partitinKeyDef); - } - const dataSetForDefaultPartitionKey: CRUDTestDataSet = { - containerDef: { id: "sample container" }, - itemDef: { - name: "sample document", - foo: "bar", - key: "value", - replace: "new property", - }, - replaceItemDef: { - name: "replaced document", - foo: "not bar", - key: "value", - replace: "new property", - }, - originalItemPartitionKey: undefined, - replacedItemPartitionKey: undefined, - propertyToCheck: ["name"], - }; - const dataSetForHierarchicalPartitionKey: CRUDTestDataSet = { - containerDef: { - id: "sample container", - partitionKey: { - paths: ["/key", "/key2"], - version: PartitionKeyDefinitionVersion.V2, - kind: PartitionKeyKind.MultiHash, - }, - }, - itemDef: { - name: "sample document", - foo: "bar", - key2: "value2", - key: "value", - replace: "new property", - }, - replaceItemDef: { - name: "replaced document", - foo: "not bar", - key2: "value2", - key: "value", - replace: "new property", - }, - originalItemPartitionKey: ["value", "value2"], - replacedItemPartitionKey: ["value", "value2"], - }; - const multiCrudDataset1: MultiCRUDTestDataSet = { - dbName: "db1", - partitinKeyDef: { - paths: ["/key"], - }, - containerDef: { - id: "col1", - }, - documents: [ - { id: "document1" }, - { id: "document2", key: null, key2: null, prop: 1 }, - { id: "document3", key: false, key2: false, prop: 1 }, - { id: "document4", key: true, key2: true, prop: 1 }, - { id: "document5", key: 1, key2: 1, prop: 1 }, - { id: "document6", key: "A", key2: "A", prop: 1 }, - { id: "document7", key: "", key2: "", prop: 1 }, - ], - containerRequestOps: { - offerThroughput: 12000, - }, - singleDocFetchQuery: "SELECT * FROM root r WHERE r.key=@key", - parameterGenerator: (doc: any) => { - return [ + await bulkDeleteItems(container, returnedDocuments, partitionKey); + }); + + it("Should auto generate an id for a collection partitioned on id", async function () { + // https://github.com/Azure/azure-sdk-for-js/issues/9734 + const container = await getTestContainer("db1", undefined, { partitionKey: "/id" }); + const { resource } = await container.items.create({}); + assert.ok(resource.id); + }); +}); + +describe("bulk/batch item operations", function () { + describe("with v1 container", function () { + let container: Container; + let readItemId: string; + let replaceItemId: string; + let deleteItemId: string; + before(async function () { + container = await getTestContainer("bulk container", undefined, { + partitionKey: { + paths: ["/key"], + version: undefined, + }, + throughput: 25100, + }); + readItemId = addEntropy("item1"); + await container.items.create({ + id: readItemId, + key: "A", + class: "2010", + }); + deleteItemId = addEntropy("item2"); + await container.items.create({ + id: deleteItemId, + key: "A", + class: "2010", + }); + replaceItemId = addEntropy("item3"); + await container.items.create({ + id: replaceItemId, + key: 5, + class: "2010", + }); + }); + after(async () => { + await container.database.delete(); + }); + it("handles create, upsert, replace, delete", async function () { + const operations = [ { - name: "@key", - value: doc["key"], + operationType: BulkOperationType.Create, + resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, }, - ]; - }, - }; - const multiCrudDatasetWithHierarchicalPartition: MultiCRUDTestDataSet = { - dbName: "db1", - partitinKeyDef: { - paths: ["/key", "/key2"], - version: PartitionKeyDefinitionVersion.V2, - kind: PartitionKeyKind.MultiHash, - }, - containerDef: { - id: "col1", - }, - documents: [ - { id: "document1" }, - { id: "document2", key: null, key2: null, prop: 1 }, - { id: "document3", key: false, key2: false, prop: 1 }, - { id: "document4", key: true, key2: true, prop: 1 }, - { id: "document5", key: 1, key2: 1, prop: 1 }, - { id: "document6", key: "A", key2: "A", prop: 1 }, - { id: "document7", key: "", key2: "", prop: 1 }, - ], - containerRequestOps: { - offerThroughput: 12000, - }, - singleDocFetchQuery: "SELECT * FROM root r WHERE r.key=@key and r.key2=@key2", - parameterGenerator: (doc: any) => { - return [ { - name: "@key", - value: doc["key"], + operationType: BulkOperationType.Upsert, + partitionKey: "A", + resourceBody: { id: addEntropy("doc2"), name: "other", key: "A" }, }, { - name: "@key2", - value: doc["key2"], + operationType: BulkOperationType.Read, + id: readItemId, + partitionKey: "A", + }, + { + operationType: BulkOperationType.Delete, + id: deleteItemId, + partitionKey: "A", + }, + { + operationType: BulkOperationType.Replace, + partitionKey: 5, + id: replaceItemId, + resourceBody: { id: replaceItemId, name: "nice", key: 5 }, }, ]; - }, - }; - - describe("V1 Container", async () => { - describe("Single Partition Container", async () => { - it("Should do document CRUD operations successfully : container with default partition key", async function () { - await CRUDTestRunner(dataSetForDefaultPartitionKey, false); - }); - }); - describe("Multi Partition Container", async () => { - it("Should do document CRUD operations successfully with upsert : container with default partition key", async function () { - await CRUDTestRunner(dataSetForDefaultPartitionKey, true); - }); + const response = await container.items.bulk(operations); + // Create + assert.equal(response[0].resourceBody.name, "sample"); + assert.equal(response[0].statusCode, 201); + // Upsert + assert.equal(response[1].resourceBody.name, "other"); + assert.equal(response[1].statusCode, 201); + // Read + assert.equal(response[2].resourceBody.class, "2010"); + assert.equal(response[2].statusCode, 200); + // Delete + assert.equal(response[3].statusCode, 204); + // Replace + assert.equal(response[4].resourceBody.name, "nice"); + assert.equal(response[4].statusCode, 200); }); }); - - describe("V2 Container", async () => { - describe("Multi Partition Container", async () => { - it("Should do document CRUD operations successfully : container with hierarchical partition key", async function () { - await CRUDTestRunner(dataSetForHierarchicalPartitionKey, false); + describe("with v2 container", function () { + let v2Container: Container; + let readItemId: string; + let replaceItemId: string; + let patchItemId: string; + let deleteItemId: string; + before(async function () { + const client = new CosmosClient({ key: masterKey, endpoint }); + const db = await client.databases.createIfNotExists({ id: "patchDb" }); + const database = db.database; + const response = await database.containers.createIfNotExists({ + id: "patchContainer", + partitionKey: { + paths: ["/key"], + version: 2, + }, + throughput: 25100, }); - it("Should do document CRUD operations successfully with upsert : container with hierarchical partition key", async function () { - await CRUDTestRunner(dataSetForHierarchicalPartitionKey, true); + v2Container = response.container; + readItemId = addEntropy("item1"); + await v2Container.items.create({ + id: readItemId, + key: true, + class: "2010", + }); + deleteItemId = addEntropy("item2"); + await v2Container.items.create({ + id: deleteItemId, + key: {}, + class: "2011", + }); + replaceItemId = addEntropy("item3"); + await v2Container.items.create({ + id: replaceItemId, + key: 5, + class: "2012", + }); + patchItemId = addEntropy("item4"); + await v2Container.items.create({ + id: patchItemId, + key: 5, + class: "2019", }); }); - }); - - it("Document CRUD over multiple partition: Single partition key", async function () { - await multipelPartitionCRUDTest(multiCrudDataset1); - }); - - it("Document CRUD over multiple partition : Hierarchical partitions", async function () { - await multipelPartitionCRUDTest(multiCrudDatasetWithHierarchicalPartition); - }); - - it("Should auto generate an id for a collection partitioned on id", async function () { - // https://github.com/Azure/azure-sdk-for-js/issues/9734 - const container = await getTestContainer("db1", undefined, { partitionKey: "/id" }); - const { resource } = await container.items.create({}); - assert.ok(resource.id); - }); -}); -// TODO: Non-deterministic test. We can't guarantee we see any response with a 429 status code since the retries happen within the response -describe("item read retries", async function () { - it("retries on 429", async function () { - const client = new CosmosClient({ key: masterKey, endpoint }); - const { resource: db } = await client.databases.create({ - id: `small db ${Math.random() * 1000}`, - }); - const containerResponse = await client - .database(db.id) - .containers.create({ id: `small container ${Math.random() * 1000}`, throughput: 400 }); - const container = containerResponse.container; - await container.items.create({ id: "readme" }); - const arr = new Array(400); - const promises = []; - for (let i = 0; i < arr.length; i++) { - promises.push(container.item("readme").read()); - } - const resp = await Promise.all(promises); - assert.equal(resp[0].statusCode, 200); - }); -}); - -describe("bulk/batch item operations", async function () { - describe("test bulk operations", async function () { - describe("v1 multi partition container", async function () { - let container: Container; - let readItemId: string; - let replaceItemId: string; - let deleteItemId: string; - before(async function () { - container = await getTestContainer("bulk container", undefined, { - partitionKey: { - paths: ["/key"], - version: undefined, - }, - throughput: 25100, - }); - readItemId = addEntropy("item1"); - await container.items.create({ + it("handles create, upsert, patch, replace, delete", async function () { + const operations = [ + { + operationType: BulkOperationType.Create, + partitionKey: "A", + resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, + }, + { + operationType: BulkOperationType.Upsert, + partitionKey: "U", + resourceBody: { name: "other", key: "U" }, + }, + { + operationType: BulkOperationType.Read, id: readItemId, - key: "A", - class: "2010", - }); - deleteItemId = addEntropy("item2"); - await container.items.create({ + partitionKey: true, + }, + { + operationType: BulkOperationType.Delete, id: deleteItemId, - key: "A", - class: "2010", - }); - replaceItemId = addEntropy("item3"); - await container.items.create({ + partitionKey: {}, + }, + { + operationType: BulkOperationType.Replace, id: replaceItemId, - key: 5, - class: "2010", - }); - }); - after(async () => { - await container.database.delete(); + resourceBody: { id: replaceItemId, name: "nice", key: 5 }, + }, + { + operationType: BulkOperationType.Patch, + partitionKey: 5, + id: patchItemId, + resourceBody: { + operations: [{ op: PatchOperationType.add, path: "/great", value: "goodValue" }], + }, + }, + { + operationType: BulkOperationType.Patch, + partitionKey: 5, + id: patchItemId, + resourceBody: { + operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], + condition: "from c where NOT IS_DEFINED(c.newImproved)", + }, + }, + ]; + const response = await v2Container.items.bulk(operations); + // Create + assert.strictEqual(response[0].resourceBody.name, "sample"); + assert.strictEqual(response[0].statusCode, 201); + // Upsert + assert.strictEqual(response[1].resourceBody.name, "other"); + assert.strictEqual(response[1].statusCode, 201); + // Read + assert.strictEqual(response[2].resourceBody.class, "2010"); + assert.strictEqual(response[2].statusCode, 200); + // Delete + assert.strictEqual(response[3].statusCode, 204); + // Replace + assert.strictEqual(response[4].resourceBody.name, "nice"); + assert.strictEqual(response[4].statusCode, 200); + // Patch + assert.strictEqual(response[5].resourceBody.great, "goodValue"); + assert.strictEqual(response[5].statusCode, 200); + }); + it("respects order", async function () { + readItemId = addEntropy("item1"); + await v2Container.items.create({ + id: readItemId, + key: "A", + class: "2010", }); - it("handles create, upsert, replace, delete", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, + const operations = [ + { + operationType: BulkOperationType.Delete, + id: readItemId, + partitionKey: "A", + }, + { + operationType: BulkOperationType.Read, + id: readItemId, + partitionKey: "A", + }, + ]; + const response = await v2Container.items.bulk(operations); + assert.equal(response[0].statusCode, 204); + // Delete occurs first, so the read returns a 404 + assert.equal(response[1].statusCode, 404); + }); + it("424 errors for operations after an error", async function () { + const operations = [ + { + operationType: BulkOperationType.Create, + resourceBody: { + ttl: -10, + key: "A", }, - { - operationType: BulkOperationType.Upsert, - partitionKey: "A", - resourceBody: { id: addEntropy("doc2"), name: "other", key: "A" }, + }, + { + operationType: BulkOperationType.Create, + resourceBody: { + key: "A", + licenseType: "B", + id: "o239uroihndsf", }, - { - operationType: BulkOperationType.Read, - id: readItemId, - partitionKey: "A", + }, + ]; + const response = await v2Container.items.bulk(operations); + assert.equal(response[1].statusCode, 424); + }); + it("Continues after errors with continueOnError true", async function () { + const operations = [ + { + operationType: BulkOperationType.Create, + resourceBody: { + ttl: -10, + key: "A", }, - { - operationType: BulkOperationType.Delete, - id: deleteItemId, - partitionKey: "A", + }, + { + operationType: BulkOperationType.Create, + resourceBody: { + key: "A", + licenseType: "B", + id: addEntropy("sifjsiof"), }, - { - operationType: BulkOperationType.Replace, - partitionKey: 5, - id: replaceItemId, - resourceBody: { id: replaceItemId, name: "nice", key: 5 }, + }, + ]; + const response = await v2Container.items.bulk(operations, { continueOnError: true }); + assert.strictEqual(response[1].statusCode, 201); + }); + it("autogenerates IDs for Create operations", async function () { + const operations = [ + { + operationType: BulkOperationType.Create, + resourceBody: { + key: "A", + licenseType: "C", }, - ]; - const response = await container.items.bulk(operations); - // Create - assert.equal(response[0].resourceBody.name, "sample"); - assert.equal(response[0].statusCode, 201); - // Upsert - assert.equal(response[1].resourceBody.name, "other"); - assert.equal(response[1].statusCode, 201); - // Read - assert.equal(response[2].resourceBody.class, "2010"); - assert.equal(response[2].statusCode, 200); - // Delete - assert.equal(response[3].statusCode, 204); - // Replace - assert.equal(response[4].resourceBody.name, "nice"); - assert.equal(response[4].statusCode, 200); + }, + ]; + const response = await v2Container.items.bulk(operations); + assert.equal(response[0].statusCode, 201); + }); + it("handles operations with null, undefined, and 0 partition keys", async function () { + const item1Id = addEntropy("item1"); + const item2Id = addEntropy("item2"); + const item3Id = addEntropy("item2"); + await v2Container.items.create({ + id: item1Id, + key: null, + class: "2010", }); + await v2Container.items.create({ + id: item2Id, + key: 0, + }); + await v2Container.items.create({ + id: item3Id, + key: undefined, + }); + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Read, + id: item1Id, + partitionKey: null, + }, + { + operationType: BulkOperationType.Read, + id: item2Id, + partitionKey: 0, + }, + { + operationType: BulkOperationType.Read, + id: item3Id, + partitionKey: undefined, + }, + ]; + const response = await v2Container.items.bulk(operations); + assert.equal(response[0].statusCode, 200); + assert.equal(response[1].statusCode, 200); + assert.equal(response[2].statusCode, 200); }); - describe("v2 container", function () { - describe("multi partition container", async function () { - let readItemId: string; - let replaceItemId: string; - let patchItemId: string; - let deleteItemId: string; - type BulkTestItem = { - id: string; - key: any; - key2?: any; - key3?: any; - class?: string; - }; - type BulkTestDataSet = { - dbName: string; - containerRequest: ContainerRequest; - documentToCreate: BulkTestItem[]; - bulkOperationOptions: BulkOptions; - operations: { - description?: string; - operation: OperationInput; - expectedOutput?: { - description?: string; - statusCode: number; - propertysToMatch: { - name: string; - value: any; - }[]; - }; - }[]; - }; - const defaultBulkTestDataSet: BulkTestDataSet = { - dbName: "bulkTestDB", - bulkOperationOptions: { - continueOnError: false, - }, - containerRequest: { - id: "patchContainer", - partitionKey: { - paths: ["/key"], - version: 2, + }); + describe("v2 single partition container", async function () { + let container: Container; + let deleteItemId: string; + before(async function () { + container = await getTestContainer("bulk container"); + deleteItemId = addEntropy("item2"); + await container.items.create({ + id: deleteItemId, + key: "A", + class: "2010", + }); + }); + it("deletes an item with default partition", async function () { + const operation: OperationInput = { + operationType: BulkOperationType.Delete, + id: deleteItemId, + }; + + const deleteResponse = await container.items.bulk([operation]); + assert.equal(deleteResponse[0].statusCode, 204); + }); + }); + describe("v2 multi partition container", async function () { + let container: Container; + let createItemId: string; + let upsertItemId: string; + before(async function () { + container = await getTestContainer("bulk container", undefined, { + partitionKey: { + paths: ["/nested/key"], + version: 2, + }, + throughput: 25100, + }); + createItemId = addEntropy("createItem"); + upsertItemId = addEntropy("upsertItem"); + }); + it("creates an item with nested object partition key", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Create, + resourceBody: { + id: createItemId, + nested: { + key: "A", }, - throughput: 25100, }, - documentToCreate: [], - operations: [], - }; - async function runBulkTestDataSet(dataset: BulkTestDataSet) { - const client = new CosmosClient({ key: masterKey, endpoint }); - const db = await client.databases.createIfNotExists({ id: dataset.dbName }); - const database = db.database; - const { container } = await database.containers.createIfNotExists( - dataset.containerRequest - ); - try { - for (const doc of dataset.documentToCreate) { - await container.items.create(doc); - } - const response = await container.items.bulk( - dataset.operations.map((value) => value.operation), - dataset.bulkOperationOptions - ); - dataset.operations.forEach(({ description, expectedOutput }, index) => { - if (expectedOutput) { - assert.strictEqual( - response[index].statusCode, - expectedOutput.statusCode, - `Failed during - ${description}` - ); - expectedOutput.propertysToMatch.forEach(({ name, value }) => { - assert.strictEqual( - response[index].resourceBody[name], - value, - `Failed during - ${description}` - ); - }); - } - }); - } finally { - await database.delete(); - } - } - function createBulkOperation( - operationType: any, - partitionKeySpecifier?: { partitionKey?: PartitionKey }, - resourceBody?: any, - id?: string - ): OperationInput { - let op: OperationInput = { - operationType, - resourceBody, - ...partitionKeySpecifier, - }; - if (resourceBody !== undefined) op = { ...op, resourceBody }; - if (id !== undefined) op = { ...op, id } as any; - return op; - } - function creatreBulkOperationExpectedOutput( - statusCode: number, - propertysToMatch: { name: string; value: any }[] - ): { - statusCode: number; - propertysToMatch: { - name: string; - value: any; - }[]; - } { - return { - statusCode, - propertysToMatch, - }; - } - describe("handles create, upsert, patch, replace, delete", async function () { - it("Hierarchical Partitions with two keys", async function () { - readItemId = addEntropy("item1"); - const createItemWithBooleanPartitionKeyId = addEntropy( - "createItemWithBooleanPartitionKeyId" - ); - const createItemWithStringPartitionKeyId = addEntropy( - "createItemWithStringPartitionKeyId" - ); - const createItemWithUnknownPartitionKeyId = addEntropy( - "createItemWithUnknownPartitionKeyId" - ); - const createItemWithNumberPartitionKeyId = addEntropy( - "createItemWithNumberPartitionKeyId" - ); - replaceItemId = addEntropy("item3"); - patchItemId = addEntropy("item4"); - deleteItemId = addEntropy("item2"); - const dataset: BulkTestDataSet = { - dbName: "hierarchical partition bulk", - containerRequest: { - id: "patchContainer", - partitionKey: { - paths: ["/key", "/key2"], - version: PartitionKeyDefinitionVersion.V2, - kind: PartitionKeyKind.MultiHash, - }, - throughput: 25100, - }, - bulkOperationOptions: { - continueOnError: false, - }, - documentToCreate: [ - { id: readItemId, key: true, key2: true, class: "2010" }, - { id: createItemWithBooleanPartitionKeyId, key: true, key2: false, class: "2010" }, - { id: createItemWithUnknownPartitionKeyId, key: {}, key2: {}, class: "2010" }, - { id: createItemWithNumberPartitionKeyId, key: 0, key2: 3, class: "2010" }, - { id: createItemWithStringPartitionKeyId, key: 5, key2: {}, class: "2010" }, - { id: deleteItemId, key: {}, key2: {}, class: "2011" }, - { id: replaceItemId, key: 5, key2: 5, class: "2012" }, - { id: patchItemId, key: 5, key2: 5, class: "2019" }, - ], - operations: [ - { - description: "Read document with partitionKey containing booleans values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [true, false] }, - undefined, - createItemWithBooleanPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: "Read document with partitionKey containing unknown values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [{}, {}] }, - undefined, - createItemWithUnknownPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: - "Creating operation's partitionKey to undefined value should fail since internally it would map to [{},{}].", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: undefined }, - { id: addEntropy("doc10"), name: "sample", key: "A", key2: "B" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(400, []), - }, - { - description: "Read document with partitionKey containing Number values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [0, 3] }, - undefined, - createItemWithNumberPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: "Creating document with partitionKey containing 2 strings.", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: ["A", "B"] }, - { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, [ - { name: "name", value: "sample" }, - ]), - }, - { - description: "Creating document with mismatching partition key.", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: ["A", "V"] }, - { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(400, []), - }, - { - description: "Upsert document with partitionKey containing 2 strings.", - operation: createBulkOperation( - BulkOperationType.Upsert, - { partitionKey: ["U", "V"] }, - { name: "other", key: "U", key2: "V" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, [ - { name: "name", value: "other" }, - ]), - }, - { - description: "Read document with partitionKey containing 2 booleans.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [true, true] }, - undefined, - readItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: - "Delete document with partitionKey containing 2 undefined partition keys.", - operation: createBulkOperation( - BulkOperationType.Delete, - { partitionKey: [{}, {}] }, - undefined, - deleteItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(204, []), - }, - { - description: "Replace document without specifying partition key.", - operation: createBulkOperation( - BulkOperationType.Replace, - {}, - { id: replaceItemId, name: "nice", key: 5, key2: 5 }, - replaceItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "name", value: "nice" }, - ]), - }, - { - description: "Patch document with partitionKey containing 2 Numbers.", - operation: createBulkOperation( - BulkOperationType.Patch, - { partitionKey: [5, 5] }, - { - operations: [ - { op: PatchOperationType.add, path: "/great", value: "goodValue" }, - ], - }, - patchItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "great", value: "goodValue" }, - ]), - }, - { - description: "Conditional Patch document with partitionKey containing 2 Numbers.", - operation: createBulkOperation( - BulkOperationType.Patch, - { partitionKey: [5, 5] }, - { - operations: [ - { op: PatchOperationType.add, path: "/good", value: "greatValue" }, - ], - condition: "from c where NOT IS_DEFINED(c.newImproved)", - }, - patchItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, []), - }, - ], - }; - await runBulkTestDataSet(dataset); - }); - it("Hierarchical Partitions with three keys", async function () { - readItemId = addEntropy("item1"); - const createItemWithBooleanPartitionKeyId = addEntropy( - "createItemWithBooleanPartitionKeyId" - ); - const createItemWithStringPartitionKeyId = addEntropy( - "createItemWithStringPartitionKeyId" - ); - const createItemWithUnknownPartitionKeyId = addEntropy( - "createItemWithUnknownPartitionKeyId" - ); - const createItemWithNumberPartitionKeyId = addEntropy( - "createItemWithNumberPartitionKeyId" - ); - replaceItemId = addEntropy("item3"); - patchItemId = addEntropy("item4"); - deleteItemId = addEntropy("item2"); - const dataset: BulkTestDataSet = { - dbName: "hierarchical partition bulk", - containerRequest: { - id: "patchContainer", - partitionKey: { - paths: ["/key", "/key2", "/key3"], - version: PartitionKeyDefinitionVersion.V2, - kind: PartitionKeyKind.MultiHash, - }, - throughput: 25100, - }, - documentToCreate: [ - { id: readItemId, key: true, key2: true, key3: true, class: "2010" }, - { - id: createItemWithBooleanPartitionKeyId, - key: true, - key2: false, - key3: true, - class: "2010", - }, - { - id: createItemWithUnknownPartitionKeyId, - key: {}, - key2: {}, - key3: {}, - class: "2010", - }, - { id: createItemWithNumberPartitionKeyId, key: 0, key2: 3, key3: 5, class: "2010" }, - { - id: createItemWithStringPartitionKeyId, - key: 5, - key2: {}, - key3: "adsf", - class: "2010", - }, - { id: deleteItemId, key: {}, key2: {}, key3: {}, class: "2011" }, - { id: replaceItemId, key: 5, key2: 5, key3: "T", class: "2012" }, - { id: patchItemId, key: 5, key2: 5, key3: true, class: "2019" }, - ], - bulkOperationOptions: {}, - operations: [ - { - description: "Read document with partitionKey containing booleans values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [true, false, true] }, - undefined, - createItemWithBooleanPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: "Read document with partitionKey containing unknown values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [{}, {}, {}] }, - undefined, - createItemWithUnknownPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: "Read document with partitionKey containing Number values.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [0, 3, 5] }, - undefined, - createItemWithNumberPartitionKeyId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: "Creating document with partitionKey containing 2 strings.", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: ["A", "B", "C"] }, - { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B", key3: "C" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, [ - { name: "name", value: "sample" }, - ]), - }, - { - description: "Creating document with mismatching partition key.", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: ["A", "V", true] }, - { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B", key3: true } - ), - expectedOutput: creatreBulkOperationExpectedOutput(400, []), - }, - { - description: "Upsert document with partitionKey containing 2 strings.", - operation: createBulkOperation( - BulkOperationType.Upsert, - { partitionKey: ["U", "V", 5] }, - { name: "other", key: "U", key2: "V", key3: 5 } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, [ - { name: "name", value: "other" }, - ]), - }, - { - description: "Read document with partitionKey containing 2 booleans.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: [true, true, true] }, - undefined, - readItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "class", value: "2010" }, - ]), - }, - { - description: - "Delete document with partitionKey containing 2 undefined partition keys.", - operation: createBulkOperation( - BulkOperationType.Delete, - { partitionKey: [{}, {}, {}] }, - undefined, - deleteItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(204, []), - }, - { - description: "Replace document without specifying partition key.", - operation: createBulkOperation( - BulkOperationType.Replace, - {}, - { id: replaceItemId, name: "nice", key: 5, key2: 5, key3: "T" }, - replaceItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "name", value: "nice" }, - ]), - }, - { - description: "Patch document with partitionKey containing 2 Numbers.", - operation: createBulkOperation( - BulkOperationType.Patch, - { partitionKey: [5, 5, true] }, - { - operations: [ - { op: PatchOperationType.add, path: "/great", value: "goodValue" }, - ], - }, - patchItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, [ - { name: "great", value: "goodValue" }, - ]), - }, - { - description: "Conditional Patch document with partitionKey containing 2 Numbers.", - operation: createBulkOperation( - BulkOperationType.Patch, - { partitionKey: [5, 5, true] }, - { - operations: [ - { op: PatchOperationType.add, path: "/good", value: "greatValue" }, - ], - condition: "from c where NOT IS_DEFINED(c.newImproved)", - }, - patchItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, []), - }, - ], - }; - await runBulkTestDataSet(dataset); - }); - }); - it("respects order", async function () { - readItemId = addEntropy("item1"); - const dataset: BulkTestDataSet = { - ...defaultBulkTestDataSet, - documentToCreate: [{ id: readItemId, key: "A", class: "2010" }], - operations: [ - { - description: "Delete for an existing item should suceed.", - operation: createBulkOperation( - BulkOperationType.Delete, - { partitionKey: "A" }, - undefined, - readItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(204, []), - }, - { - description: "Delete occurs first, so the read returns a 404.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: "A" }, - undefined, - readItemId - ), - expectedOutput: creatreBulkOperationExpectedOutput(404, []), - }, - ], - }; - runBulkTestDataSet(dataset); - }); - it("424 errors for operations after an error", async function () { - const dataset: BulkTestDataSet = { - ...defaultBulkTestDataSet, - documentToCreate: [], - operations: [ - { - description: "Operation should fail with invalid ttl.", - operation: createBulkOperation( - BulkOperationType.Create, - {}, - { ttl: -10, key: "A" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(400, []), - }, - { - description: "", - operation: createBulkOperation( - BulkOperationType.Create, - { partitionKey: "A" }, - { key: "A", licenseType: "B", id: "o239uroihndsf" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(424, []), - }, - ], - }; - runBulkTestDataSet(dataset); - }); - it("Continues after errors with continueOnError true", async function () { - const dataset: BulkTestDataSet = { - ...defaultBulkTestDataSet, - documentToCreate: [], - bulkOperationOptions: { - continueOnError: true, + }, + { + operationType: BulkOperationType.Upsert, + resourceBody: { + id: upsertItemId, + nested: { + key: false, }, - operations: [ - { - description: "Operation should fail with invalid ttl.", - operation: createBulkOperation( - BulkOperationType.Create, - {}, - { ttl: -10, key: "A" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(400, []), - }, - { - description: - "Operation should suceed and should not be abondoned because of previous failure, since continueOnError is true.", - operation: createBulkOperation( - BulkOperationType.Create, - {}, - { key: "A", licenseType: "B", id: addEntropy("sifjsiof") } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, []), - }, - ], - }; - runBulkTestDataSet(dataset); - // const operations = [ - // { - // operationType: BulkOperationType.Create, - // resourceBody: { - // ttl: -10, - // key: "A", - // }, - // }, - // { - // operationType: BulkOperationType.Create, - // resourceBody: { - // key: "A", - // licenseType: "B", - // id: addEntropy("sifjsiof"), - // }, - // }, - // ]; - // const response = await v2Container.items.bulk(operations, { continueOnError: true }); - // assert.strictEqual(response[1].statusCode, 201); - }); - it("autogenerates IDs for Create operations", async function () { - const dataset: BulkTestDataSet = { - ...defaultBulkTestDataSet, - operations: [ - { - description: "Operation should fail with invalid ttl.", - operation: createBulkOperation( - BulkOperationType.Create, - {}, - { key: "A", licenseType: "C" } - ), - expectedOutput: creatreBulkOperationExpectedOutput(201, []), - }, - ], - }; - runBulkTestDataSet(dataset); - }); - it("handles operations with null, undefined, and 0 partition keys", async function () { - const item1Id = addEntropy("item1"); - const item2Id = addEntropy("item2"); - const item3Id = addEntropy("item2"); - const dataset: BulkTestDataSet = { - ...defaultBulkTestDataSet, - documentToCreate: [ - { id: item1Id, key: null, class: "2010" }, - { id: item2Id, key: 0 }, - { id: item3Id, key: undefined }, - ], - operations: [ - { - description: "Read document with null partition key should suceed.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: null }, - {}, - item1Id - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, []), - }, - { - description: "Read document with 0 partition key should suceed.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: 0 }, - {}, - item1Id - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, []), - }, - { - description: "Read document with undefined partition key should suceed.", - operation: createBulkOperation( - BulkOperationType.Read, - { partitionKey: undefined }, - {}, - item1Id - ), - expectedOutput: creatreBulkOperationExpectedOutput(200, []), - }, - ], - }; - runBulkTestDataSet(dataset); + }, + }, + ]; - // await v2Container.items.create({ - // id: item1Id, - // key: null, - // class: "2010", - // }); - // await v2Container.items.create({ - // id: item2Id, - // key: 0, - // }); - // await v2Container.items.create({ - // id: item3Id, - // key: undefined, - // }); - // const operations: OperationInput[] = [ - // { - // operationType: BulkOperationType.Read, - // id: item1Id, - // partitionKey: null, - // }, - // { - // operationType: BulkOperationType.Read, - // id: item2Id, - // partitionKey: 0, - // }, - // { - // operationType: BulkOperationType.Read, - // id: item3Id, - // partitionKey: undefined, - // }, - // ]; - // const response = await v2Container.items.bulk(operations); - // assert.equal(response[0].statusCode, 200); - // assert.equal(response[1].statusCode, 200); - // assert.equal(response[2].statusCode, 200); - }); - }); - describe("multi partition container - nested partition key", async function () { - let container: Container; - let createItemId: string; - let upsertItemId: string; - before(async function () { - container = await getTestContainer("bulk container", undefined, { - partitionKey: { - paths: ["/nested/key"], - version: 2, - }, - throughput: 25100, - }); - createItemId = addEntropy("createItem"); - upsertItemId = addEntropy("upsertItem"); - }); - it("creates an item with nested object partition key", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Create, - resourceBody: { - id: createItemId, - nested: { - key: "A", - }, - }, - }, - { - operationType: BulkOperationType.Upsert, - resourceBody: { - id: upsertItemId, - nested: { - key: false, - }, - }, - }, - ]; + const createResponse = await container.items.bulk(operations); + assert.equal(createResponse[0].statusCode, 201); + }); + }); - const createResponse = await container.items.bulk(operations); - assert.equal(createResponse[0].statusCode, 201); - }); + // TODO: Non-deterministic test. We can't guarantee we see any response with a 429 status code since the retries happen within the response + describe("item read retries", async function () { + it("retries on 429", async function () { + const client = new CosmosClient({ key: masterKey, endpoint }); + const { resource: db } = await client.databases.create({ + id: `small db ${Math.random() * 1000}`, }); - describe("single partition container", async function () { - let container: Container; - let deleteItemId: string; - before(async function () { - container = await getTestContainer("bulk container"); - deleteItemId = addEntropy("item2"); - await container.items.create({ - id: deleteItemId, - key: "A", - class: "2010", - }); - }); - it("deletes an item with default partition", async function () { - const operation: OperationInput = { - operationType: BulkOperationType.Delete, - id: deleteItemId, - }; + const containerResponse = await client + .database(db.id) + .containers.create({ id: `small container ${Math.random() * 1000}`, throughput: 400 }); + const container = containerResponse.container; + await container.items.create({ id: "readme" }); + const arr = new Array(400); + const promises = []; + for (let i = 0; i < arr.length; i++) { + promises.push(container.item("readme").read()); + } + const resp = await Promise.all(promises); + assert.equal(resp[0].statusCode, 200); + }); + }); - const deleteResponse = await container.items.bulk([operation]); - assert.equal(deleteResponse[0].statusCode, 204); - }); + describe("v2 single partition container", async function () { + let container: Container; + let createItemId: string; + let otherItemId: string; + let upsertItemId: string; + let replaceItemId: string; + let deleteItemId: string; + let patchItemId: string; + before(async function () { + const client = new CosmosClient({ key: masterKey, endpoint }); + const db = await client.databases.createIfNotExists({ id: "patchDb" }); + const contResponse = await db.database.containers.createIfNotExists({ + id: "patchContainer", + partitionKey: { + paths: ["/key"], + version: 2, + }, + throughput: 25100, + }); + container = contResponse.container; + deleteItemId = addEntropy("item1"); + createItemId = addEntropy("item2"); + otherItemId = addEntropy("item2"); + upsertItemId = addEntropy("item4"); + replaceItemId = addEntropy("item3"); + patchItemId = addEntropy("item5"); + await container.items.create({ + id: deleteItemId, + key: "A", + class: "2010", + }); + await container.items.create({ + id: replaceItemId, + key: "A", + class: "2010", + }); + await container.items.create({ + id: patchItemId, + key: "A", + class: "2010", }); }); - }); - describe("test batch operations", function () { - describe("v2 multi partition container", async function () { - let container: Container; - let createItemId: string; - let otherItemId: string; - let upsertItemId: string; - let replaceItemId: string; - let deleteItemId: string; - let patchItemId: string; - before(async function () { - const client = new CosmosClient({ key: masterKey, endpoint }); - const db = await client.databases.createIfNotExists({ id: "patchDb" }); - const contResponse = await db.database.containers.createIfNotExists({ - id: "patchContainer", - partitionKey: { - paths: ["/key"], - version: 2, - }, - throughput: 25100, - }); - container = contResponse.container; - deleteItemId = addEntropy("item1"); - createItemId = addEntropy("item2"); - otherItemId = addEntropy("item2"); - upsertItemId = addEntropy("item4"); - replaceItemId = addEntropy("item3"); - patchItemId = addEntropy("item5"); - await container.items.create({ - id: deleteItemId, - key: "A", - class: "2010", - }); - await container.items.create({ + it("can batch all operation types", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Create, + resourceBody: { id: createItemId, key: "A", school: "high" }, + }, + { + operationType: BulkOperationType.Upsert, + resourceBody: { id: upsertItemId, key: "A", school: "elementary" }, + }, + { + operationType: BulkOperationType.Replace, id: replaceItemId, - key: "A", - class: "2010", - }); - await container.items.create({ + resourceBody: { id: replaceItemId, key: "A", school: "junior high" }, + }, + { + operationType: BulkOperationType.Delete, + id: deleteItemId, + }, + { + operationType: BulkOperationType.Patch, id: patchItemId, - key: "A", - class: "2010", - }); - }); - it("can batch all operation types", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Create, - resourceBody: { id: createItemId, key: "A", school: "high" }, + resourceBody: { + operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], + condition: "from c where NOT IS_DEFINED(c.newImproved)", }, - { - operationType: BulkOperationType.Upsert, - resourceBody: { id: upsertItemId, key: "A", school: "elementary" }, - }, - { - operationType: BulkOperationType.Replace, - id: replaceItemId, - resourceBody: { id: replaceItemId, key: "A", school: "junior high" }, - }, - { - operationType: BulkOperationType.Delete, - id: deleteItemId, - }, - { - operationType: BulkOperationType.Patch, - id: patchItemId, - resourceBody: { - operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], - condition: "from c where NOT IS_DEFINED(c.newImproved)", - }, - }, - ]; - - const response = await container.items.batch(operations, "A"); - assert(isOperationResponse(response.result[0])); - assert.strictEqual(response.result[0].statusCode, 201); - assert.strictEqual(response.result[1].statusCode, 201); - assert.strictEqual(response.result[2].statusCode, 200); - assert.strictEqual(response.result[3].statusCode, 204); - assert.strictEqual(response.result[4].statusCode, 200); - }); - it("rolls back prior operations when one fails", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Upsert, - resourceBody: { id: otherItemId, key: "A", school: "elementary" }, - }, - { - operationType: BulkOperationType.Delete, - id: deleteItemId + addEntropy("make this 404"), - }, - ]; + }, + ]; - const deleteResponse = await container.items.batch(operations, "A"); - assert.strictEqual(deleteResponse.result[0].statusCode, 424); - assert.strictEqual(deleteResponse.result[1].statusCode, 404); - const { resource: readItem } = await container.item(otherItemId).read(); - assert.strictEqual(readItem, undefined); - assert(isOperationResponse(deleteResponse.result[0])); - }); + const response = await container.items.batch(operations, "A"); + assert(isOperationResponse(response.result[0])); + assert.strictEqual(response.result[0].statusCode, 201); + assert.strictEqual(response.result[1].statusCode, 201); + assert.strictEqual(response.result[2].statusCode, 200); + assert.strictEqual(response.result[3].statusCode, 204); + assert.strictEqual(response.result[4].statusCode, 200); + }); + it("rolls back prior operations when one fails", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Upsert, + resourceBody: { id: otherItemId, key: "A", school: "elementary" }, + }, + { + operationType: BulkOperationType.Delete, + id: deleteItemId + addEntropy("make this 404"), + }, + ]; - function isOperationResponse(object: unknown): object is OperationResponse { - return ( - typeof object === "object" && - object !== null && - Object.prototype.hasOwnProperty.call(object, "statusCode") && - Object.prototype.hasOwnProperty.call(object, "requestCharge") - ); - } + const deleteResponse = await container.items.batch(operations, "A"); + assert.strictEqual(deleteResponse.result[0].statusCode, 424); + assert.strictEqual(deleteResponse.result[1].statusCode, 404); + const { resource: readItem } = await container.item(otherItemId).read(); + assert.strictEqual(readItem, undefined); + assert(isOperationResponse(deleteResponse.result[0])); }); + + function isOperationResponse(object: unknown): object is OperationResponse { + return ( + typeof object === "object" && + object !== null && + Object.prototype.hasOwnProperty.call(object, "statusCode") && + Object.prototype.hasOwnProperty.call(object, "requestCharge") + ); + } }); }); - describe("patch operations", function () { describe("various mixed operations", function () { let container: Container; diff --git a/sdk/cosmosdb/cosmos/tsconfig.strict.json b/sdk/cosmosdb/cosmos/tsconfig.strict.json index 50c610202437..30c74085188c 100644 --- a/sdk/cosmosdb/cosmos/tsconfig.strict.json +++ b/sdk/cosmosdb/cosmos/tsconfig.strict.json @@ -125,7 +125,6 @@ "src/routing/index.ts", "src/utils/SasToken.ts", "src/utils/tracing.ts", - "src/utils/hashing", "src/client/SasToken/SasTokenProperties.ts", "src/client/SasToken/PermissionScopeValues.ts", "test/public/common/TestHelpers.ts",